def get_cv(org_id, target_env, env_list, prior_list, promote_list): """Get the content views""" # Find the ID of the environment we are promoting to and from if not target_env in env_list: msg = "Target environment '" + target_env + "' not found" helpers.log_msg(msg, 'ERROR') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) helpers.mailout(helpers.MAILSUBJ_FP, output) sys.exit(1) else: target_env_id = env_list[target_env] source_env_id = prior_list[target_env_id] # Query API to get all content views for our org cvs = helpers.get_json( helpers.KATELLO_API + "organizations/" + str(org_id) + "/content_views/") ver_list = {} ver_descr = {} ver_version = {} for cv_result in cvs['results']: # We will never promote to/from the DOV if cv_result['name'] != "Default Organization View": # Handle specific includes and excludes if promote_list and cv_result['name'] not in promote_list: msg = "Skipping content view '" + cv_result['name'] + "'" helpers.log_msg(msg, 'DEBUG') continue # Get the ID of each Content View msg = "Processing content view '" + cv_result['name'] + "'" helpers.log_msg(msg, 'DEBUG') # Find the current version of the view in the env we are coming from for ver in cv_result['versions']: msg = " Found in env_id " + str(ver['environment_ids']) + " view_id " +\ str(ver['id']) helpers.log_msg(msg, 'DEBUG') if source_env_id in ver['environment_ids']: # Extract the name of the source environment so we can inform the user for key, val in env_list.items(): if val == source_env_id: prior_env = key msg = "Found promotable version " + ver['version'] + " of '" +\ cv_result['name'] + "' in " + prior_env helpers.log_msg(msg, 'INFO') print msg # Create a dictionary of CV IDs and the CV vers ID to promote ver_list[cv_result['id']] = ver['id'] ver_descr[cv_result['id']] = cv_result['name'] ver_version[cv_result['id']] = ver['version'] return ver_list, ver_descr, ver_version
def export_cv(dov_ver, last_export, export_type): """ Export Content View Takes the content view version and a start time (API 'since' value) """ if export_type == 'full': msg = "Exporting complete DOV version " + str(dov_ver) else: msg = "Exporting DOV version " + str(dov_ver) + " from start date " + last_export helpers.log_msg(msg, 'INFO') try: if export_type == 'full': task_id = helpers.post_json( helpers.KATELLO_API + "content_view_versions/" + str(dov_ver) + "/export", \ json.dumps( { } ))["id"] else: task_id = helpers.post_json( helpers.KATELLO_API + "content_view_versions/" + str(dov_ver) + "/export/", \ json.dumps( { "since": last_export, } ))["id"] except: # pylint: disable-msg=W0702 msg = "Unable to start export - Conflicting Sync or Export already in progress" helpers.log_msg(msg, 'ERROR') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) subject = "Satellite 6 export failure" helpers.mailout(subject, output) sys.exit(1) # Trap some other error conditions if "Required lock is already taken" in str(task_id): msg = "Unable to start export - Sync in progress" helpers.log_msg(msg, 'ERROR') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) subject = "Satellite 6 export failure" helpers.mailout(subject, output) sys.exit(1) msg = "Export started, task_id = " + str(task_id) helpers.log_msg(msg, 'DEBUG') return str(task_id)
def do_gpg_check(export_dir): """ Find and GPG Check all RPM files """ msg = "Checking GPG integrity of exported RPMs..." helpers.log_msg(msg, 'INFO') output = "{:<70}".format(msg) print output[:70], # Force the status message to be shown to the user sys.stdout.flush() badrpms = [] os.chdir(export_dir) for rpm in locate("*.rpm"): return_code = subprocess.call("rpm -K " + rpm, shell=True, stdout=open(os.devnull, 'wb')) # A non-zero return code indicates a GPG check failure. if return_code != 0: # For display purposes, strip the first 6 directory elements rpmnew = os.path.join(*(rpm.split(os.path.sep)[6:])) badrpms.append(rpmnew) # If we have any bad ones we need to fail the export. if len(badrpms) != 0: print helpers.RED + "GPG Check FAILED" + helpers.ENDC msg = "The following RPM's failed the GPG check.." helpers.log_msg(msg, 'ERROR') for badone in badrpms: msg = badone helpers.log_msg(msg, 'ERROR') msg = "------ Export Aborted ------" helpers.log_msg(msg, 'INFO') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) subject = "Satellite 6 export failure - GPG checksum failure" message = "GPG check of exported RPMs failed. Check logs for details\n\n" + output helpers.mailout(subject, message) sys.exit(1) else: msg = "GPG check completed successfully" helpers.log_msg(msg, 'INFO') print helpers.GREEN + "GPG Check - Pass" + helpers.ENDC
def get_inputfiles(dataset): """ Verify the input files exist and are valid. 'dataset' is a date (YYYYMMDD-HHMM_ENV) provided by the user - date is in the filename of the archive Returned 'basename' is the full export filename (sat6_export_YYYYMMDD-HHMM_ENV) """ basename = 'sat6_export_' + dataset shafile = basename + '.sha256' if not os.path.exists(helpers.IMPORTDIR + '/' + basename + '.sha256'): msg = "Cannot continue - missing sha256sum file " + helpers.IMPORTDIR + '/' + shafile helpers.log_msg(msg, 'ERROR') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) helpers.mailout(helpers.MAILSUBJ_FI, output) sys.exit(1) # Verify the checksum of each part of the import os.chdir(helpers.IMPORTDIR) msg = 'Verifying Checksums in ' + helpers.IMPORTDIR + '/' + shafile helpers.log_msg(msg, 'INFO') print msg result = os.system('sha256sum -c ' + shafile) # Return code from sha256sum is 0 if all is fine. if result != 0: msg = "Import Aborted - Tarfile checksum verification failed" helpers.log_msg(msg, 'ERROR') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) helpers.mailout(helpers.MAILSUBJ_FI, output) sys.exit(1) # We're good msg = "Tarfile checksum verification passed" helpers.log_msg(msg, 'INFO') print helpers.GREEN + "Checksum verification - Pass" + helpers.ENDC return basename
def get_inputfiles(dataset): """Verify the input files exist and are valid. 'dataset' is a date (YYYYMMDD-HHMM_ENV) provided by the user - date is in the filename of the archive Returned 'basename' is the full export filename (sat6_export_YYYYMMDD-HHMM_ENV) """ basename = 'sat6_export_' + dataset shafile = basename + '.sha256' if not os.path.exists(helpers.IMPORTDIR + '/' + basename + '.sha256'): msg = "Cannot continue - missing sha256sum file " + helpers.IMPORTDIR + '/' + shafile helpers.log_msg(msg, 'ERROR') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) helpers.mailout(helpers.MAILSUBJ_FI, output) sys.exit(1) # Verify the checksum of each part of the import os.chdir(helpers.IMPORTDIR) msg = 'Verifying Checksums in ' + helpers.IMPORTDIR + '/' + shafile helpers.log_msg(msg, 'INFO') print msg result = os.system('sha256sum -c ' + shafile) # Return code from sha256sum is 0 if all is fine. if result != 0: msg = "Import Aborted - Tarfile checksum verification failed" helpers.log_msg(msg, 'ERROR') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) helpers.mailout(helpers.MAILSUBJ_FI, output) sys.exit(1) # We're good msg = "Tarfile checksum verification passed" helpers.log_msg(msg, 'INFO') print helpers.GREEN + "Checksum verification - Pass" + helpers.ENDC return basename
def main(args): """ Main routine """ # Who is running this script? runuser = helpers.who_is_running() # Set the base dir of the script and where the var data is global dir global vardir dir = os.path.dirname(__file__) vardir = os.path.join(dir, 'var') confdir = os.path.join(dir, 'config') # Check for sane input parser = argparse.ArgumentParser( description='Publishes content views for specified organization.') group = parser.add_mutually_exclusive_group() # pylint: disable=bad-continuation parser.add_argument('-o', '--org', help='Organization (Uses default if not specified)', required=False) group.add_argument('-a', '--all', help='Publish ALL content views', required=False, action="store_true") parser.add_argument('-d', '--dryrun', help='Dry Run - Only show what will be published', required=False, action="store_true") parser.add_argument('-l', '--last', help='Display last promotions', required=False, action="store_true") parser.add_argument('-c', '--comment', help="Add a custom description", required=False) parser.add_argument('-q', '--quiet', help="Suppress progress output updates", required=False, action="store_true") args = parser.parse_args() # Log the fact we are starting if not args.last: msg = "-------- Content view publish started by " + runuser + " -----------" helpers.log_msg(msg, 'INFO') # Set our script variables from the input args if args.org: org_name = args.org else: org_name = helpers.ORG_NAME dry_run = args.dryrun # Set up the description that will be added to the published version if args.comment: description = args.comment else: description = "Published by " + runuser + "\n via API script" # Load the promotion history if not os.path.exists(vardir + '/promotions.pkl'): if not os.path.exists(vardir): os.makedirs(vardir) phistory = {} else: phistory = pickle.load(open(vardir + '/promotions.pkl', 'rb')) # Read the promotion history if --last requested if args.last: if phistory: print 'Last promotions:' for lenv, time in phistory.iteritems(): print lenv, time else: print 'No promotions recorded' sys.exit(0) publish_list = [] if not args.all: publish_list = helpers.CONFIG['publish']['content_views'] if not publish_list: msg = "Cannot find publish configuration" helpers.log_msg(msg, 'ERROR') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) helpers.mailout(helpers.MAILSUBJ_FP, output) sys.exit(1) msg = "Config found for CV's " + str(publish_list) helpers.log_msg(msg, 'DEBUG') # Get the org_id (Validates our connection to the API) org_id = helpers.get_org_id(org_name) # Get the list of Content Views along with the latest view version in each environment (ver_list, ver_descr, ver_version) = get_cv(org_id, publish_list) # Publish the content views. Returns a list of task IDs. publish(ver_list, ver_descr, ver_version, dry_run, runuser, description, args.quiet) # Add/Update the promotion history dictionary so we can check when we last promoted phistory['Library'] = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d') pickle.dump(phistory, open(vardir + '/promotions.pkl', 'wb')) # Run the mailout if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) message = "Publish completed successfully\n\n" + output subject = "Satellite 6 publish completed" helpers.mailout(subject, message) # Exit cleanly sys.exit(0)
def main(args): """ Main routine """ # Who is running this script? runuser = helpers.who_is_running() # Set the base dir of the script and where the var data is global dir global vardir dir = os.path.dirname(__file__) vardir = os.path.join(dir, 'var') # Check for sane input parser = argparse.ArgumentParser( description='Promotes content views for specified organization to the target environment.') group = parser.add_mutually_exclusive_group() # pylint: disable=bad-continuation parser.add_argument('-e', '--env', help='Target Environment (e.g. Development, Quality, Production)', required=False) parser.add_argument('-o', '--org', help='Organization (Uses default if not specified)', required=False) group.add_argument('-a', '--all', help='Promote ALL content views', required=False, action="store_true") parser.add_argument('-d', '--dryrun', help='Dry Run - Only show what will be promoted', required=False, action="store_true") parser.add_argument('-l', '--last', help='Display last promotions', required=False, action="store_true") parser.add_argument('-q', '--quiet', help="Suppress progress output updates", required=False, action="store_true") args = parser.parse_args() # Log the fact we are starting msg = "-------- Content view promotion started by " + runuser + " -----------" helpers.log_msg(msg, 'INFO') # Set our script variables from the input args if args.org: org_name = args.org else: org_name = helpers.ORG_NAME target_env = args.env dry_run = args.dryrun # Load the promotion history if not os.path.exists(vardir + '/promotions.pkl'): if not os.path.exists(vardir): os.makedirs(vardir) phistory = {} else: phistory = pickle.load(open(vardir + '/promotions.pkl', 'rb')) # Read the promotion history if --last requested if args.last: if phistory: print 'Last promotions:' for lenv, time in phistory.iteritems(): print lenv, time else: print 'No promotions recorded' sys.exit(0) # Error if no environment to promote to is given if args.env is None: parser.error('--env is required') promote_list = [] if not args.all: for x in helpers.CONFIG['promotion']: if x == 'batch': continue if helpers.CONFIG['promotion'][x]['name'] == target_env: promote_list = helpers.CONFIG['promotion'][x]['content_views'] if not promote_list: msg = "Cannot find promotion configuration for '" + target_env + "'" helpers.log_msg(msg, 'ERROR') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) helpers.mailout(helpers.MAILSUBJ_FP, output) sys.exit(1) msg = "Config found for CV's " + str(promote_list) helpers.log_msg(msg, 'DEBUG') # Get the org_id (Validates our connection to the API) org_id = helpers.get_org_id(org_name) # Now, let's fetch all available lifecycle environments for this org... (env_list, prior_list) = get_envs(org_id) # Get the list of Content Views along with the latest view version in each environment (ver_list, ver_descr, ver_version) = get_cv(org_id, target_env, env_list, prior_list, promote_list) # Promote to the given environment. Returns a list of task IDs. promote(target_env, ver_list, ver_descr, ver_version, env_list, prior_list, dry_run, args.quiet) # Add/Update the promotion history dictionary so we can check when we last promoted phistory[target_env] = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d') pickle.dump(phistory, open(vardir + '/promotions.pkl', 'wb')) # Run the mailout if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) message = "Promotion completed successfully\n\n" + output subject = "Satellite 6 promotion completed" helpers.mailout(subject, message) # Exit cleanly sys.exit(0)
def promote(target_env, ver_list, ver_descr, ver_version, env_list, prior_list, dry_run, quiet): """Promote Content View""" target_env_id = env_list[target_env] source_env_id = prior_list[target_env_id] # Extract the name of the source environment so we can inform the user for key, val in env_list.items(): if val == source_env_id: prior_env = key # Set the task name to be displayed in the task monitoring stage task_name = "Promotion from " + prior_env + " to " + target_env # Now we have all the info needed, we can actually trigger the promotion. # Loop through each CV with promotable versions task_list = [] ref_list = {} # Catch scenario that no CV versions are found matching promotion criteria if not ver_list: msg = "No content view versions found matching promotion criteria" helpers.log_msg(msg, 'WARNING') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) helpers.mailout(helpers.MAILSUBJ_FP, output) sys.exit(1) # Break repos to promote into batches as configured in config.yml cvchunks = [ ver_list.keys()[i:i+helpers.PROMOTEBATCH] for i in range(0, len(ver_list), helpers.PROMOTEBATCH) ] # Loop through the smaller subsets of repo id's for chunk in cvchunks: for cvid in chunk: # Check if there is a publish/promote already running on this content view locked = helpers.check_running_publish(cvid, ver_descr[cvid]) if not locked: msg = "Promoting '" + str(ver_descr[cvid]) + "' Version " + str(ver_version[cvid]) +\ " from " + prior_env + " to " + str(target_env) helpers.log_msg(msg, 'INFO') print helpers.HEADER + msg + helpers.ENDC if not dry_run and not locked: try: task_id = helpers.post_json( helpers.KATELLO_API + "content_view_versions/" + str(ver_list[cvid]) +\ "/promote/", json.dumps( { "environment_id": target_env_id } ))["id"] except Warning: msg = "Failed to initiate promotion of " + str(ver_descr[cvid]) helpers.log_msg(msg, 'WARNING') else: task_list.append(task_id) ref_list[task_id] = ver_descr[cvid] # Exit in the case of a dry-run if dry_run: msg = "Dry run - not actually performing promotion" helpers.log_msg(msg, 'WARNING') else: # Monitor the status of the promotion tasks helpers.watch_tasks(task_list, ref_list, task_name, quiet) # Exit in the case of a dry-run if dry_run: sys.exit(2) else: return
def publish(ver_list, ver_descr, ver_version, dry_run, runuser, description, quiet, forcemeta): """Publish Content View""" # Set the task name to be displayed in the task monitoring stage task_name = "Publish content view to Library" # Now we have all the info needed, we can actually trigger the publish. task_list = [] ref_list = {} # Catch scenario that no CV versions are found matching publish criteria if not ver_list: msg = "No content view versions found matching publication criteria" helpers.log_msg(msg, 'ERROR') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) helpers.mailout(helpers.MAILSUBJ_FP, output) sys.exit(1) # Break repos to publish into batches as configured in config.yml cvchunks = [ ver_list.keys()[i:i+helpers.PUBLISHBATCH] for i in range(0, len(ver_list), helpers.PUBLISHBATCH) ] # Loop through the smaller subsets of repo id's for chunk in cvchunks: for cvid in chunk: # Check if there is a publish/promote already running on this content view locked = helpers.check_running_publish(ver_list[cvid], ver_descr[cvid]) if not locked: msg = "Publishing '" + str(ver_descr[cvid]) + "' Version " + str(ver_version[cvid]) + ".0" helpers.log_msg(msg, 'INFO') print helpers.HEADER + msg + helpers.ENDC if not dry_run and not locked: try: task_id = helpers.post_json( helpers.KATELLO_API + "content_views/" + str(ver_list[cvid]) +\ "/publish", json.dumps( { "description": description, "force_yum_metadata_regeneration": str(forcemeta) } ))["id"] except Warning: msg = "Failed to initiate publication of " + str(ver_descr[cvid]) helpers.log_msg(msg, 'WARNING') except KeyError: msg = "Failed to initiate publication of " + str(ver_descr[cvid]) helpers.log_msg(msg, 'WARNING') else: task_list.append(task_id) ref_list[task_id] = ver_descr[cvid] # Notify user in the case of a dry-run if dry_run: msg = "Dry run - not actually performing publish" helpers.log_msg(msg, 'WARNING') else: # Wait for the tasks to finish helpers.watch_tasks(task_list, ref_list, task_name, quiet) # Exit in the case of a dry-run if dry_run: sys.exit(2) else: return
def main(args): """ Main routine """ # Who is running this script? runuser = helpers.who_is_running() # Set the base dir of the script and where the var data is global dir global vardir dir = os.path.dirname(__file__) vardir = os.path.join(dir, 'var') confdir = os.path.join(dir, 'config') # Check for sane input parser = argparse.ArgumentParser( description='Publishes content views for specified organization.') group = parser.add_mutually_exclusive_group() # pylint: disable=bad-continuation parser.add_argument('-o', '--org', help='Organization (Uses default if not specified)', required=False) group.add_argument('-a', '--all', help='Publish ALL content views', required=False, action="store_true") parser.add_argument('-d', '--dryrun', help='Dry Run - Only show what will be published', required=False, action="store_true") parser.add_argument('-l', '--last', help='Display last promotions', required=False, action="store_true") parser.add_argument('-c', '--comment', help="Add a custom description", required=False) parser.add_argument('-q', '--quiet', help="Suppress progress output updates", required=False, action="store_true") parser.add_argument('-m', '--forcemeta', help="Force metadata regeneration", required=False, action="store_true") args = parser.parse_args() # Log the fact we are starting if not args.last: msg = "-------- Content view publish started by " + runuser + " -----------" helpers.log_msg(msg, 'INFO') # Set our script variables from the input args if args.org: org_name = args.org else: org_name = helpers.ORG_NAME dry_run = args.dryrun # Set up the description that will be added to the published version if args.comment: description = args.comment else: description = "Published by " + runuser + "\n via API script" # Load the promotion history if not os.path.exists(vardir + '/promotions.pkl'): if not os.path.exists(vardir): os.makedirs(vardir) phistory = {} else: phistory = pickle.load(open(vardir + '/promotions.pkl', 'rb')) # Read the promotion history if --last requested if args.last: if phistory: print 'Last promotions:' for lenv, time in phistory.iteritems(): print lenv, time else: print 'No promotions recorded' sys.exit(0) publish_list = [] if not args.all: publish_list = helpers.CONFIG['publish']['content_views'] if not publish_list: msg = "Cannot find publish configuration" helpers.log_msg(msg, 'ERROR') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) helpers.mailout(helpers.MAILSUBJ_FP, output) sys.exit(1) msg = "Config found for CV's " + str(publish_list) helpers.log_msg(msg, 'DEBUG') # Get the org_id (Validates our connection to the API) org_id = helpers.get_org_id(org_name) # Get the list of Content Views along with the latest view version in each environment (ver_list, ver_descr, ver_version) = get_cv(org_id, publish_list) # Publish the content views. Returns a list of task IDs. publish(ver_list, ver_descr, ver_version, dry_run, runuser, description, args.quiet, args.forcemeta) # Add/Update the promotion history dictionary so we can check when we last promoted phistory['Library'] = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d') pickle.dump(phistory, open(vardir + '/promotions.pkl', 'wb')) # Run the mailout if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) message = "Publish completed successfully\n\n" + output subject = "Satellite 6 publish completed" helpers.mailout(subject, message) # Exit cleanly sys.exit(0)
def main(args): """Perform import of Default Content View.""" #pylint: disable-msg=R0912,R0914,R0915 if not helpers.DISCONNECTED: msg = "Import cannot be run on the connected Satellite (Sync) host" helpers.log_msg(msg, 'ERROR') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) helpers.mailout(helpers.MAILSUBJ_FI, output) sys.exit(1) # Who is running this script? runuser = helpers.who_is_running() # Set the base dir of the script and where the var data is global dir global vardir dir = os.path.dirname(__file__) vardir = os.path.join(dir, 'var') # Log the fact we are starting msg = "------------- Content import started by " + runuser + " ----------------" helpers.log_msg(msg, 'INFO') # Check for sane input parser = argparse.ArgumentParser( description='Performs Import of Default Content View.') # pylint: disable=bad-continuation parser.add_argument('-o', '--org', help='Organization (Uses default if not specified)', required=False) parser.add_argument('-d', '--dataset', \ help='Date/name of Import dataset to process (YYYY-MM-DD_NAME)', required=False) parser.add_argument('-n', '--nosync', help='Do not trigger a sync after extracting content', required=False, action="store_true") parser.add_argument('-r', '--remove', help='Remove input files after import has completed', required=False, action="store_true") parser.add_argument('-l', '--last', help='Display the last successful import performed', required=False, action="store_true") parser.add_argument('-L', '--list', help='List all successfully completed imports', required=False, action="store_true") parser.add_argument('-c', '--count', help='Display all package counts after import', required=False, action="store_true") parser.add_argument( '-f', '--force', help='Force import of data if it has previously been done', required=False, action="store_true") parser.add_argument( '-u', '--unattended', help='Answer any prompts safely, allowing automated usage', required=False, action="store_true") parser.add_argument('--fixhistory', help='Force import history to match export history', required=False, action="store_true") args = parser.parse_args() # Set our script variables from the input args if args.org: org_name = args.org else: org_name = helpers.ORG_NAME dataset = args.dataset if args.fixhistory: fixhistory = True else: fixhistory = False # Record where we are running from script_dir = str(os.getcwd()) # Get the org_id (Validates our connection to the API) org_id = helpers.get_org_id(org_name) imports = [] # Read the last imports data if os.path.exists(vardir + '/imports.pkl'): imports = pickle.load(open(vardir + '/imports.pkl', 'rb')) # If we have a string we convert to a list. This should only occur the first time we # migrate from the original string version of the pickle. if type(imports) is str: imports = imports.split() last_import = imports[-1] # Display the last successful import(s) if args.last or args.list: if os.path.exists(vardir + '/imports.pkl'): if args.last: msg = "Last successful import was " + last_import helpers.log_msg(msg, 'INFO') print msg if args.list: print "Completed imports:\n----------------" for item in imports: print item else: msg = "Import has never been performed" helpers.log_msg(msg, 'INFO') print msg sys.exit(0) # If we got this far without -d being specified, error out cleanly if args.dataset is None: parser.error("--dataset is required") # If we have already imported this dataset let the user know if dataset in imports: if not args.force: msg = "Dataset " + dataset + " has already been imported. Use --force if you really want to do this." helpers.log_msg(msg, 'WARNING') sys.exit(2) # Figure out if we have the specified input fileset basename = get_inputfiles(dataset) # Cleanup from any previous imports os.system("rm -rf " + helpers.IMPORTDIR + "/{content,custom,listing,*.pkl}") # Extract the input files extract_content(basename) # Read in the export history from the input dataset dsname = dataset.split('_')[1] exports = pickle.load( open(helpers.IMPORTDIR + '/exporthistory_' + dsname + '.pkl', 'rb')) # Check for and let the user decide if they want to continue with missing imports missing_imports = check_missing(imports, exports, dataset, fixhistory, vardir) if missing_imports: msg = "Run sat_import with the --fixhistory flag to reset the import history to this export" helpers.log_msg(msg, 'INFO') if not args.unattended: answer = helpers.query_yes_no("Continue with import?", "no") if not answer: msg = "Import Aborted" helpers.log_msg(msg, 'ERROR') sys.exit(3) else: msg = "Import continued by user" helpers.log_msg(msg, 'INFO') else: msg = "Import Aborted" helpers.log_msg(msg, 'ERROR') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) helpers.mailout(helpers.MAILSUBJ_FI, output) sys.exit(3) # Trigger a sync of the content into the Library if args.nosync: #print helpers.GREEN + "Import complete.\n" + helpers.ENDC msg = "Repository sync was requested to be skipped" helpers.log_msg(msg, 'WARNING') print 'Please synchronise all repositories to make new content available for publishing.' delete_override = True else: # We need to figure out which repos to sync. This comes to us via a pickle containing # a list of repositories that were exported imported_repos = pickle.load(open('exported_repos.pkl', 'rb')) package_count = pickle.load(open('package_count.pkl', 'rb')) # Run a repo sync on each imported repo (delete_override, newrepos) = sync_content(org_id, imported_repos) print helpers.GREEN + "Import complete.\n" + helpers.ENDC print 'Please publish content views to make new content available.' # Verify the repository package/erratum counts match the sync host check_counts(org_id, package_count, args.count) if os.path.exists(helpers.IMPORTDIR + '/puppetforge'): print 'Offline puppet-forge-server bundle is available to import seperately in '\ + helpers.IMPORTDIR + '/puppetforge\n' if args.remove and not delete_override: msg = "Removing input files from " + helpers.IMPORTDIR helpers.log_msg(msg, 'INFO') print msg os.system("rm -f " + helpers.IMPORTDIR + "/sat6_export_" + dataset + "*") os.system("rm -rf " + helpers.IMPORTDIR + "/{content,custom,listing,*.pkl}") excode = 0 elif delete_override: msg = "* Not removing input files due to incomplete sync *" helpers.log_msg(msg, 'INFO') print msg excode = 2 else: msg = " (Removal of input files was not requested)" helpers.log_msg(msg, 'INFO') print msg excode = 0 msg = "Import Complete" helpers.log_msg(msg, 'INFO') # Save the last completed import data (append to existing pickle) os.chdir(script_dir) if not os.path.exists(vardir): os.makedirs(vardir) imports.append(dataset) pickle.dump(imports, open(vardir + '/imports.pkl', "wb")) # Run the mailout if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) if missing_imports: message = "Import of dataset " + dataset + " completed successfully.\n\n \ Missing datasets were detected during the import - please check the logs\n\n" + output subject = "Satellite 6 import completed: Missing datasets" elif newrepos: message = "Import of dataset " + dataset + " completed successfully.\n\n \ New repos found that need to be imported manually - please check the logs \n\n" + output subject = "Satellite 6 import completed: New repos require manual intervention" else: message = "Import of dataset " + dataset + " completed successfully\n\n" + output subject = "Satellite 6 import completed" helpers.mailout(subject, message) # And exit. sys.exit(excode)
def publish(ver_list, ver_descr, ver_version, dry_run, runuser, description, quiet): """Publish Content View""" # Set the task name to be displayed in the task monitoring stage task_name = "Publish content view to Library" # Now we have all the info needed, we can actually trigger the publish. task_list = [] ref_list = {} # Catch scenario that no CV versions are found matching publish criteria if not ver_list: msg = "No content view versions found matching publication criteria" helpers.log_msg(msg, 'ERROR') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) helpers.mailout(helpers.MAILSUBJ_FP, output) sys.exit(1) # Break repos to publish into batches as configured in config.yml cvchunks = [ ver_list.keys()[i:i + helpers.PUBLISHBATCH] for i in range(0, len(ver_list), helpers.PUBLISHBATCH) ] # Loop through the smaller subsets of repo id's for chunk in cvchunks: for cvid in chunk: # Check if there is a publish/promote already running on this content view locked = helpers.check_running_publish(ver_list[cvid], ver_descr[cvid]) if not locked: msg = "Publishing '" + str( ver_descr[cvid]) + "' Version " + str( ver_version[cvid]) + ".0" helpers.log_msg(msg, 'INFO') print helpers.HEADER + msg + helpers.ENDC if not dry_run and not locked: try: task_id = helpers.post_json( helpers.KATELLO_API + "content_views/" + str(ver_list[cvid]) +\ "/publish", json.dumps( { "description": description } ))["id"] except Warning: msg = "Failed to initiate publication of " + str( ver_descr[cvid]) helpers.log_msg(msg, 'WARNING') else: task_list.append(task_id) ref_list[task_id] = ver_descr[cvid] # Notify user in the case of a dry-run if dry_run: msg = "Dry run - not actually performing publish" helpers.log_msg(msg, 'WARNING') else: # Wait for the tasks to finish helpers.watch_tasks(task_list, ref_list, task_name, quiet) # Exit in the case of a dry-run if dry_run: sys.exit(2) else: return
def main(args): """ Main Routine """ #pylint: disable-msg=R0912,R0914,R0915 if not helpers.DISCONNECTED: msg = "Import cannot be run on the connected Satellite (Sync) host" helpers.log_msg(msg, 'ERROR') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) helpers.mailout(helpers.MAILSUBJ_FI, output) sys.exit(1) # Who is running this script? runuser = helpers.who_is_running() # Set the base dir of the script and where the var data is global dir global vardir dir = os.path.dirname(__file__) vardir = os.path.join(dir, 'var') # Log the fact we are starting msg = "------------- Content import started by " + runuser + " ----------------" helpers.log_msg(msg, 'INFO') # Check for sane input parser = argparse.ArgumentParser(description='Performs Import of Default Content View.') # pylint: disable=bad-continuation parser.add_argument('-o', '--org', help='Organization (Uses default if not specified)', required=False) parser.add_argument('-d', '--dataset', \ help='Date/name of Import dataset to process (YYYY-MM-DD_NAME)', required=False) parser.add_argument('-n', '--nosync', help='Do not trigger a sync after extracting content', required=False, action="store_true") parser.add_argument('-r', '--remove', help='Remove input files after import has completed', required=False, action="store_true") parser.add_argument('-l', '--last', help='Display the last successful import performed', required=False, action="store_true") parser.add_argument('-L', '--list', help='List all successfully completed imports', required=False, action="store_true") parser.add_argument('-c', '--count', help='Display all package counts after import', required=False, action="store_true") parser.add_argument('-f', '--force', help='Force import of data if it has previously been done', required=False, action="store_true") parser.add_argument('-u', '--unattended', help='Answer any prompts safely, allowing automated usage', required=False, action="store_true") parser.add_argument('--fixhistory', help='Force import history to match export history', required=False, action="store_true") args = parser.parse_args() # Set our script variables from the input args if args.org: org_name = args.org else: org_name = helpers.ORG_NAME dataset = args.dataset if args.fixhistory: fixhistory = True else: fixhistory = False # Record where we are running from script_dir = str(os.getcwd()) # Get the org_id (Validates our connection to the API) org_id = helpers.get_org_id(org_name) imports = [] # Read the last imports data if os.path.exists(vardir + '/imports.pkl'): imports = pickle.load(open(vardir + '/imports.pkl', 'rb')) # If we have a string we convert to a list. This should only occur the first time we # migrate from the original string version of the pickle. if type(imports) is str: imports = imports.split() last_import = imports[-1] # Display the last successful import(s) if args.last or args.list: if os.path.exists(vardir + '/imports.pkl'): if args.last: msg = "Last successful import was " + last_import helpers.log_msg(msg, 'INFO') print msg if args.list: print "Completed imports:\n----------------" for item in imports: print item else: msg = "Import has never been performed" helpers.log_msg(msg, 'INFO') print msg sys.exit(0) # If we got this far without -d being specified, error out cleanly if args.dataset is None: parser.error("--dataset is required") # If we have already imported this dataset let the user know if dataset in imports: if not args.force: msg = "Dataset " + dataset + " has already been imported. Use --force if you really want to do this." helpers.log_msg(msg, 'WARNING') sys.exit(2) # Figure out if we have the specified input fileset basename = get_inputfiles(dataset) # Cleanup from any previous imports os.system("rm -rf " + helpers.IMPORTDIR + "/{content,custom,listing,*.pkl}") # Extract the input files extract_content(basename) # Read in the export history from the input dataset dsname = dataset.split('_')[1] exports = pickle.load(open(helpers.IMPORTDIR + '/exporthistory_' + dsname + '.pkl', 'rb')) # Check for and let the user decide if they want to continue with missing imports missing_imports = check_missing(imports, exports, dataset, fixhistory, vardir) if missing_imports: msg = "Run sat_import with the --fixhistory flag to reset the import history to this export" helpers.log_msg(msg, 'INFO') if not args.unattended: answer = helpers.query_yes_no("Continue with import?", "no") if not answer: msg = "Import Aborted" helpers.log_msg(msg, 'ERROR') sys.exit(3) else: msg = "Import continued by user" helpers.log_msg(msg, 'INFO') else: msg = "Import Aborted" helpers.log_msg(msg, 'ERROR') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) helpers.mailout(helpers.MAILSUBJ_FI, output) sys.exit(3) # Trigger a sync of the content into the Library if args.nosync: #print helpers.GREEN + "Import complete.\n" + helpers.ENDC msg = "Repository sync was requested to be skipped" helpers.log_msg(msg, 'WARNING') print 'Please synchronise all repositories to make new content available for publishing.' delete_override = True else: # We need to figure out which repos to sync. This comes to us via a pickle containing # a list of repositories that were exported imported_repos = pickle.load(open('exported_repos.pkl', 'rb')) package_count = pickle.load(open('package_count.pkl', 'rb')) # Run a repo sync on each imported repo (delete_override, newrepos) = sync_content(org_id, imported_repos) print helpers.GREEN + "Import complete.\n" + helpers.ENDC print 'Please publish content views to make new content available.' # Verify the repository package/erratum counts match the sync host check_counts(org_id, package_count, args.count) if os.path.exists(helpers.IMPORTDIR + '/puppetforge'): print 'Offline puppet-forge-server bundle is available to import seperately in '\ + helpers.IMPORTDIR + '/puppetforge\n' if args.remove and not delete_override: msg = "Removing input files from " + helpers.IMPORTDIR helpers.log_msg(msg, 'INFO') print msg os.system("rm -f " + helpers.IMPORTDIR + "/sat6_export_" + dataset + "*") os.system("rm -rf " + helpers.IMPORTDIR + "/{content,custom,listing,*.pkl}") excode = 0 elif delete_override: msg = "* Not removing input files due to incomplete sync *" helpers.log_msg(msg, 'INFO') print msg excode = 2 else: msg = " (Removal of input files was not requested)" helpers.log_msg(msg, 'INFO') print msg excode = 0 msg = "Import Complete" helpers.log_msg(msg, 'INFO') # Save the last completed import data (append to existing pickle) os.chdir(script_dir) if not os.path.exists(vardir): os.makedirs(vardir) imports.append(dataset) pickle.dump(imports, open(vardir + '/imports.pkl', "wb")) # Run the mailout if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) if missing_imports: message = "Import of dataset " + dataset + " completed successfully.\n\n \ Missing datasets were detected during the import - please check the logs\n\n" + output subject = "Satellite 6 import completed: Missing datasets" elif newrepos: message = "Import of dataset " + dataset + " completed successfully.\n\n \ New repos found that need to be imported manually - please check the logs \n\n" + output subject = "Satellite 6 import completed: New repos require manual intervention" else: message = "Import of dataset " + dataset + " completed successfully\n\n" + output subject = "Satellite 6 import completed" helpers.mailout(subject, message) # And exit. sys.exit(excode)
def main(args): """ Main Routine """ #pylint: disable-msg=R0912,R0914,R0915 # Who is running this script? runuser = helpers.who_is_running() # Set the base dir of the script and where the var data is global dir global vardir dir = os.path.dirname(__file__) vardir = os.path.join(dir, 'var') confdir = os.path.join(dir, 'config') # Check for sane input parser = argparse.ArgumentParser(description='Performs Export of Default Content View.') group = parser.add_mutually_exclusive_group() # pylint: disable=bad-continuation parser.add_argument('-o', '--org', help='Organization (Uses default if not specified)', required=False) parser.add_argument('-e', '--env', help='Environment config', required=False) group.add_argument('-a', '--all', help='Export ALL content', required=False, action="store_true") group.add_argument('-i', '--incr', help='Incremental Export of content since last run', required=False, action="store_true") group.add_argument('-s', '--since', help='Export content since YYYY-MM-DD HH:MM:SS', required=False, type=helpers.valid_date) parser.add_argument('-l', '--last', help='Display time of last export', required=False, action="store_true") parser.add_argument('-L', '--list', help='Display export history', required=False, action="store_true") parser.add_argument('--nogpg', help='Skip GPG checking', required=False, action="store_true") parser.add_argument('-u', '--unattended', help='Answer any prompts safely, allowing automated usage', required=False, action="store_true") parser.add_argument('--notar', help='Skip TAR creation', required=False, action="store_true") parser.add_argument('--forcexport', help='Force export on import-only satellite', required=False, action="store_true") parser.add_argument('-r', '--repodata', help='Include repodata for repos with no new packages', required=False, action="store_true") parser.add_argument('-p', '--puppetforge', help='Include puppet-forge-server format Puppet Forge repo', required=False, action="store_true") args = parser.parse_args() # If we are set as the 'DISCONNECTED' satellite, we will generally be IMPORTING content. if helpers.DISCONNECTED: if not args.forcexport: msg = "Export cannot be run on the disconnected Satellite host" helpers.log_msg(msg, 'ERROR') sys.exit(1) # Set our script variables from the input args if args.org: org_name = args.org else: org_name = helpers.ORG_NAME since = args.since if args.puppetforge: pforge = True else: pforge = False # Record where we are running from script_dir = str(os.getcwd()) # Get the org_id (Validates our connection to the API) org_id = helpers.get_org_id(org_name) exported_repos = [] export_history = [] package_count = {} # If a specific environment is requested, find and read that config file repocfg = os.path.join(dir, confdir + '/exports.yml') if args.env: if not os.path.exists(repocfg): msg = 'Config file ' + confdir + '/exports.yml not found.' helpers.log_msg(msg, 'ERROR') sys.exit(1) cfg = yaml.safe_load(open(repocfg, 'r')) ename = args.env erepos = [] validrepo = False for x in cfg['exports']: if cfg['exports'][x]['name'] == ename: validrepo = True erepos = cfg['exports'][x]['repos'] if not validrepo: msg = 'Unable to find export config for ' + ename helpers.log_msg(msg, 'ERROR') sys.exit(1) msg = "Specific environment export called for " + ename + "." helpers.log_msg(msg, 'DEBUG') for repo in erepos: msg = " - " + repo helpers.log_msg(msg, 'DEBUG') else: ename = 'DoV' label = 'DoV' msg = "DoV export called" helpers.log_msg(msg, 'DEBUG') # Read the last export date pickle for our selected repo group. export_times = read_pickle(ename) export_type = 'incr' # Open the export history pickle so we can append to it if os.path.exists(vardir + '/exporthistory_' + ename + '.pkl'): export_history = pickle.load(open(vardir + '/exporthistory_' + ename + '.pkl', 'rb')) if args.all: print "Performing full content export for " + ename export_type = 'full' since = False else: if not since: since = False if args.last: if export_times: print "Last successful export for " + ename + ":" for time in export_times: repo = "{:<70}".format(time) print repo[:70] + '\t' + str(export_times[time]) else: print "Export has never been performed for " + ename sys.exit(0) if not export_times: print "No prior export recorded for " + ename + ", performing full content export" export_type = 'full' # Display the full export history if args.list: if export_history: print "Export history for " + ename + ":" for item in export_history: print item sys.exit(0) else: print "Export has never been performed for " + ename sys.exit(0) else: # Re-populate export_times dictionary so each repo has 'since' date since_export = str(since) # We have our timestamp so we can kick of an incremental export print "Incremental export of content for " + ename + " synchronised after " \ + str(since) # Log the fact we are starting msg = "------------- Content export started by " + runuser + " ----------------" if args.env: msg = "------ " + ename + " Content export started by " + runuser + " ---------" helpers.log_msg(msg, 'INFO') # Get the current time - this will be the 'last export' time if the export is OK start_time = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S') print "START: " + start_time + " (" + ename + " export)" # Check the available space in /var/lib/pulp check_disk_space(export_type,args.unattended) # Remove any previous exported content left behind by prior unclean exit if os.path.exists(helpers.EXPORTDIR + '/export'): msg = "Removing existing export directory" helpers.log_msg(msg, 'DEBUG') shutil.rmtree(helpers.EXPORTDIR + '/export') # Collect a list of enabled repositories. This is needed for: # 1. Matching specific repo exports, and # 2. Running import sync per repo on the disconnected side repolist = helpers.get_p_json( helpers.KATELLO_API + "/repositories/", \ json.dumps( { "organization_id": org_id, "per_page": '1000', } )) # If we are running a full DoV export we run a different set of API calls... if ename == 'DoV': cola = "Exporting DoV" if export_type == 'incr' and 'DoV' in export_times: last_export = export_times['DoV'] if since: last_export = since_export else: # To ensure we get ALL the packages reset the time to midnight on the last_export day last_export = last_export.split(' ')[0] + " 00:00:00" colb = "(INCR since " + last_export + ")" else: export_type = 'full' last_export = '2000-01-01 12:00:00' # This is a dummy value, never used. colb = "(FULL)" msg = cola + " " + colb helpers.log_msg(msg, 'INFO') output = "{:<70}".format(cola) print output[:70] + ' ' + colb # Check if there are any currently running tasks that will conflict with an export check_running_tasks(label, ename) # Get the version of the CV (Default Org View) to export dov_ver = get_cv(org_id) # Now we have a CV ID and a starting date, and no conflicting tasks, we can export export_id = export_cv(dov_ver, last_export, export_type) # Now we need to wait for the export to complete helpers.wait_for_task(export_id, 'export') # Check if the export completed OK. If not we exit the script. tinfo = helpers.get_task_status(export_id) if tinfo['state'] != 'running' and tinfo['result'] == 'success': msg = "Content View Export OK" helpers.log_msg(msg, 'INFO') print helpers.GREEN + msg + helpers.ENDC # Update the export timestamp for this repo export_times['DoV'] = start_time # Generate a list of repositories that were exported for repo_result in repolist['results']: if repo_result['content_type'] == 'yum': # Add the repo to the successfully exported list exported_repos.append(repo_result['label']) else: msg = "Content View Export FAILED" helpers.log_msg(msg, 'ERROR') sys.exit(1) else: # Verify that defined repos exist in Satellite for repo in erepos: repo_in_sat = False for repo_x in repolist['results']: if re.findall("\\b" + repo + "\\b$", repo_x['label']): repo_in_sat = True break if repo_in_sat == False: msg = "'" + repo + "' not found in Satellite" helpers.log_msg(msg, 'WARNING') # Process each repo for repo_result in repolist['results']: if repo_result['content_type'] == 'yum': # If we have a match, do the export if repo_result['label'] in erepos: # Extract the last export time for this repo orig_export_type = export_type cola = "Export " + repo_result['label'] if export_type == 'incr' and repo_result['label'] in export_times: last_export = export_times[repo_result['label']] if since: last_export = since_export else: # To ensure we get ALL the packages reset the time to midnight on the last_export day last_export = last_export.split(' ')[0] + " 00:00:00" colb = "(INCR since " + last_export + ")" else: export_type = 'full' last_export = '2000-01-01 12:00:00' # This is a dummy value, never used. colb = "(FULL)" msg = cola + " " + colb helpers.log_msg(msg, 'INFO') output = "{:<70}".format(cola) print output[:70] + ' ' + colb # Check if there are any currently running tasks that will conflict ok_to_export = check_running_tasks(repo_result['label'], ename) if ok_to_export: # Count the number of packages numpkg = count_packages(repo_result['id']) package_count[repo_result['label']] = numpkg # Trigger export on the repo export_id = export_repo(repo_result['id'], last_export, export_type) # Now we need to wait for the export to complete helpers.wait_for_task(export_id, 'export') # Check if the export completed OK. If not we exit the script. tinfo = helpers.get_task_status(export_id) if tinfo['state'] != 'running' and tinfo['result'] == 'success': # Count the number of exported packages # First resolve the product label - this forms part of the export path product = get_product(org_id, repo_result['product']['cp_id']) # Now we can build the export path itself basepath = helpers.EXPORTDIR + "/" + org_name + "-" + product + "-" + repo_result['label'] if export_type == 'incr': basepath = basepath + "-incremental" exportpath = basepath + "/" + repo_result['relative_path'] msg = "\nExport path = " + exportpath helpers.log_msg(msg, 'DEBUG') if not os.path.exists(exportpath): msg = exportpath + " was not created.\nCheck permissions/SELinux on export dir" helpers.log_msg(msg, 'ERROR') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) subject = "Satellite 6 export failure" helpers.mailout(subject, output) sys.exit(1) os.chdir(exportpath) numrpms = len([f for f in os.walk(".").next()[2] if f[ -4: ] == ".rpm"]) msg = "Repository Export OK (" + str(numrpms) + " new packages)" helpers.log_msg(msg, 'INFO') print helpers.GREEN + msg + helpers.ENDC # Update the export timestamp for this repo export_times[repo_result['label']] = start_time # Add the repo to the successfully exported list if numrpms != 0 or args.repodata: msg = "Adding " + repo_result['label'] + " to export list" helpers.log_msg(msg, 'DEBUG') exported_repos.append(repo_result['label']) else: msg = "Not including repodata for empty repo " + repo_result['label'] helpers.log_msg(msg, 'DEBUG') else: msg = "Export FAILED" helpers.log_msg(msg, 'ERROR') # Reset the export type to the user specified, in case we overrode it. export_type = orig_export_type else: msg = "Skipping " + repo_result['label'] helpers.log_msg(msg, 'DEBUG') # Handle FILE type exports (ISO repos) elif repo_result['content_type'] == 'file': # If we have a match, do the export if repo_result['label'] in erepos: # Extract the last export time for this repo orig_export_type = export_type cola = "Export " + repo_result['label'] if export_type == 'incr' and repo_result['label'] in export_times: last_export = export_times[repo_result['label']] if since: last_export = since_export else: # To ensure we get ALL the packages reset the time to midnight on the last_export day last_export = last_export.split(' ')[0] + " 00:00:00" colb = "(INCR since " + last_export + ")" else: export_type = 'full' last_export = '2000-01-01 12:00:00' # This is a dummy value, never used. colb = "(FULL)" msg = cola + " " + colb helpers.log_msg(msg, 'INFO') output = "{:<70}".format(cola) print output[:70] + ' ' + colb # Check if there are any currently running tasks that will conflict ok_to_export = check_running_tasks(repo_result['label'], ename) if ok_to_export: # Trigger export on the repo numfiles = export_iso(repo_result['id'], repo_result['label'], repo_result['relative_path'], last_export, export_type) # Reset the export type to the user specified, in case we overrode it. export_type = orig_export_type # Update the export timestamp for this repo export_times[repo_result['label']] = start_time # Add the repo to the successfully exported list if numfiles != 0 or args.repodata: msg = "Adding " + repo_result['label'] + " to export list" helpers.log_msg(msg, 'DEBUG') exported_repos.append(repo_result['label']) else: msg = "Not including repodata for empty repo " + repo_result['label'] helpers.log_msg(msg, 'DEBUG') else: msg = "Skipping " + repo_result['label'] helpers.log_msg(msg, 'DEBUG') elif repo_result['content_type'] == 'puppet': # If we have a match, do the export if repo_result['label'] in erepos: # Extract the last export time for this repo orig_export_type = export_type cola = "Export " + repo_result['label'] if export_type == 'incr' and repo_result['label'] in export_times: last_export = export_times[repo_result['label']] if since: last_export = since_export else: # To ensure we get ALL the packages reset the time to midnight on the last_export day last_export = last_export.split(' ')[0] + " 00:00:00" colb = "(INCR since " + last_export + ")" else: export_type = 'full' last_export = '2000-01-01 12:00:00' # This is a dummy value, never used. colb = "(FULL)" msg = cola + " " + colb helpers.log_msg(msg, 'INFO') output = "{:<70}".format(cola) print output[:70] + ' ' + colb # Check if there are any currently running tasks that will conflict ok_to_export = check_running_tasks(repo_result['label'], ename) if ok_to_export: # Trigger export on the repo numfiles = export_puppet(repo_result['id'], repo_result['label'], repo_result['relative_path'], last_export, export_type, pforge) # Reset the export type to the user specified, in case we overrode it. export_type = orig_export_type # Update the export timestamp for this repo export_times[repo_result['label']] = start_time # Add the repo to the successfully exported list if numfiles != 0 or args.repodata: msg = "Adding " + repo_result['label'] + " to export list" helpers.log_msg(msg, 'DEBUG') exported_repos.append(repo_result['label']) else: msg = "Not including repodata for empty repo " + repo_result['label'] helpers.log_msg(msg, 'DEBUG') else: msg = "Skipping " + repo_result['label'] helpers.log_msg(msg, 'DEBUG') # Combine resulting directory structures into a single repo format (top level = /content) prep_export_tree(org_name) # Now we need to process the on-disk export data. # Define the location of our exported data. export_dir = helpers.EXPORTDIR + "/export" # Write out the list of exported repos and the package counts. These will be transferred to the # disconnected system and used to perform the repo sync tasks during the import. pickle.dump(exported_repos, open(export_dir + '/exported_repos.pkl', 'wb')) pickle.dump(package_count, open(export_dir + '/package_count.pkl', 'wb')) # Run GPG Checks on the exported RPMs if not args.nogpg: do_gpg_check(export_dir) # Copy in the manifest, if it has been downloaded export_manifest() # Add our exported data to a tarfile if not args.notar: create_tar(export_dir, ename, export_history) else: # We need to manually clean up a couple of working files from the export if os.path.exists(helpers.EXPORTDIR + "/iso"): shutil.rmtree(helpers.EXPORTDIR + "/iso") if os.path.exists(helpers.EXPORTDIR + "/puppet"): shutil.rmtree(helpers.EXPORTDIR + "/puppet") os.system("rm -f " + helpers.EXPORTDIR + "/*.pkl") os.system("rm -f " + export_dir + "/*.pkl") # Copy export_dir to cdn_export to prevent blowing it away next time we export copy_tree(export_dir,helpers.EXPORTDIR + "/cdn_export") # Cleanup shutil.rmtree(helpers.EXPORTDIR + "/cdn_export/manifest", ignore_errors=True, onerror=None) shutil.rmtree(export_dir) # We're done. Write the start timestamp to file for next time os.chdir(script_dir) pickle.dump(export_times, open(vardir + '/exports_' + ename + '.pkl', "wb")) # And we're done! print helpers.GREEN + "Export complete.\n" + helpers.ENDC if not args.notar: print 'Please transfer the contents of ' + helpers.EXPORTDIR + \ ' to your disconnected Satellite system content import location.\n' \ 'Once transferred, please run ' + helpers.BOLD + ' sat_import' \ + helpers.ENDC + ' to extract it.' msg = "Export complete" helpers.log_msg(msg, 'INFO') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) subject = "Satellite 6 export complete" message = "Export of " + ename + " successfully completed\n\n" + output helpers.mailout(subject, message) # Exit cleanly sys.exit(0)
def main(args): """ Main routine """ # Who is running this script? runuser = helpers.who_is_running() # Set the base dir of the script and where the var data is global dir global vardir dir = os.path.dirname(__file__) vardir = os.path.join(dir, 'var') # Check for sane input parser = argparse.ArgumentParser( description='Promotes content views for specified organization to the target environment.') group = parser.add_mutually_exclusive_group() # pylint: disable=bad-continuation parser.add_argument('-e', '--env', help='Target Environment (e.g. Development, Quality, Production)', required=False) parser.add_argument('-o', '--org', help='Organization (Uses default if not specified)', required=False) group.add_argument('-a', '--all', help='Promote ALL content views', required=False, action="store_true") parser.add_argument('-d', '--dryrun', help='Dry Run - Only show what will be promoted', required=False, action="store_true") parser.add_argument('-l', '--last', help='Display last promotions', required=False, action="store_true") parser.add_argument('-q', '--quiet', help="Suppress progress output updates", required=False, action="store_true") parser.add_argument('-m', '--forcemeta', help="Force metadata regeneration", required=False, action="store_true") args = parser.parse_args() # Log the fact we are starting msg = "-------- Content view promotion started by " + runuser + " -----------" helpers.log_msg(msg, 'INFO') # Set our script variables from the input args if args.org: org_name = args.org else: org_name = helpers.ORG_NAME target_env = args.env dry_run = args.dryrun # Load the promotion history if not os.path.exists(vardir + '/promotions.pkl'): if not os.path.exists(vardir): os.makedirs(vardir) phistory = {} else: phistory = pickle.load(open(vardir + '/promotions.pkl', 'rb')) # Read the promotion history if --last requested if args.last: if phistory: print 'Last promotions:' for lenv, time in phistory.iteritems(): print lenv, time else: print 'No promotions recorded' sys.exit(0) # Error if no environment to promote to is given if args.env is None: parser.error('--env is required') promote_list = [] if not args.all: for x in helpers.CONFIG['promotion']: if x == 'batch': continue if helpers.CONFIG['promotion'][x]['name'] == target_env: promote_list = helpers.CONFIG['promotion'][x]['content_views'] if not promote_list: msg = "Cannot find promotion configuration for '" + target_env + "'" helpers.log_msg(msg, 'ERROR') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) helpers.mailout(helpers.MAILSUBJ_FP, output) sys.exit(1) msg = "Config found for CV's " + str(promote_list) helpers.log_msg(msg, 'DEBUG') # Get the org_id (Validates our connection to the API) org_id = helpers.get_org_id(org_name) # Now, let's fetch all available lifecycle environments for this org... (env_list, prior_list) = get_envs(org_id) # Get the list of Content Views along with the latest view version in each environment (ver_list, ver_descr, ver_version) = get_cv(org_id, target_env, env_list, prior_list, promote_list) # Promote to the given environment. Returns a list of task IDs. promote(target_env, ver_list, ver_descr, ver_version, env_list, prior_list, dry_run, args.quiet, args.forcemeta) # Add/Update the promotion history dictionary so we can check when we last promoted phistory[target_env] = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d') pickle.dump(phistory, open(vardir + '/promotions.pkl', 'wb')) # Run the mailout if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) message = "Promotion completed successfully\n\n" + output subject = "Satellite 6 promotion completed" helpers.mailout(subject, message) # Exit cleanly sys.exit(0)
def promote(target_env, ver_list, ver_descr, ver_version, env_list, prior_list, dry_run, quiet, forcemeta): """Promote Content View""" target_env_id = env_list[target_env] source_env_id = prior_list[target_env_id] # Extract the name of the source environment so we can inform the user for key, val in env_list.items(): if val == source_env_id: prior_env = key # Set the task name to be displayed in the task monitoring stage task_name = "Promotion from " + prior_env + " to " + target_env # Now we have all the info needed, we can actually trigger the promotion. # Loop through each CV with promotable versions task_list = [] ref_list = {} # Catch scenario that no CV versions are found matching promotion criteria if not ver_list: msg = "No content view versions found matching promotion criteria" helpers.log_msg(msg, 'WARNING') if helpers.MAILOUT: helpers.tf.seek(0) output = "{}".format(helpers.tf.read()) helpers.mailout(helpers.MAILSUBJ_FP, output) sys.exit(1) # Break repos to promote into batches as configured in config.yml cvchunks = [ ver_list.keys()[i:i+helpers.PROMOTEBATCH] for i in range(0, len(ver_list), helpers.PROMOTEBATCH) ] # Loop through the smaller subsets of repo id's for chunk in cvchunks: for cvid in chunk: # Check if there is a publish/promote already running on this content view locked = helpers.check_running_publish(cvid, ver_descr[cvid]) if not locked: msg = "Promoting '" + str(ver_descr[cvid]) + "' Version " + str(ver_version[cvid]) +\ " from " + prior_env + " to " + str(target_env) helpers.log_msg(msg, 'INFO') print helpers.HEADER + msg + helpers.ENDC if not dry_run and not locked: try: task_id = helpers.post_json( helpers.KATELLO_API + "content_view_versions/" + str(ver_list[cvid]) +\ "/promote/", json.dumps( { "environment_id": target_env_id, "force_yum_metadata_regeneration": str(forcemeta) } ))["id"] except Warning: msg = "Failed to initiate promotion of " + str(ver_descr[cvid]) helpers.log_msg(msg, 'WARNING') else: task_list.append(task_id) ref_list[task_id] = ver_descr[cvid] # Exit in the case of a dry-run if dry_run: msg = "Dry run - not actually performing promotion" helpers.log_msg(msg, 'WARNING') else: # Monitor the status of the promotion tasks helpers.watch_tasks(task_list, ref_list, task_name, quiet) # Exit in the case of a dry-run if dry_run: sys.exit(2) else: return