Beispiel #1
0
def cleanup(ver_list, ver_descr, dry_run, runuser, ver_keep, cleanall, ignorefirstpromoted):
    """Clean Content Views"""

    # Set the task name to be displayed in the task monitoring stage
    task_name = "Cleanup content views"

    # Now we have all the info needed, we can actually trigger the cleanup.
    task_list = []
    ref_list = {}

    # Catch scenario that no CV versions are found matching cleanup criteria
    if not ver_list:
        msg = "No content view versions found matching cleanup criteria"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(1)

    for cvid in sorted(ver_list.keys(),reverse=True):
        # Check if there is a publish/promote already running on this content view
        locked = helpers.check_running_publish(ver_list[cvid], ver_descr[cvid])

        msg = "Cleaning content view '" + str(ver_descr[cvid]) + "'" 
        helpers.log_msg(msg, 'INFO')
        print helpers.HEADER + msg + helpers.ENDC

        # For the given content view we need to find the orphaned versions
        cvinfo = get_content_view_info(cvid)

        # Find the oldest published version
        version_list = []
        version_list_all = []
        for version in cvinfo['versions']:
            if not version['environment_ids']:
                version_list_all.append(float(version['version']))
                continue
            else:
                msg = "Found version " + str(version['version'])
                helpers.log_msg(msg, 'DEBUG')
                # Add the version id to a list
                version_list.append(float(version['version']))

        # Find the oldest 'in use' version id
        if not version_list:
            msg = "No oldest in-use version found"
        else:
            lastver = min(version_list)
            msg = "Oldest in-use version is " + str(lastver)
        helpers.log_msg(msg, 'DEBUG')

        # Find the oldest 'NOT in use' version id
        if not version_list_all:
            msg = "No oldest NOT-in-use version found"
        else:
            msg = "Oldest NOT-in-use version is " + str(min(version_list_all))
        helpers.log_msg(msg, 'DEBUG')

        # Find version to delete (based on keep parameter) if --ignorefirstpromoted
        version_list_all.sort()
        todelete = version_list_all[:(len(version_list_all) - int(ver_keep[cvid]))]
        msg = "Versions to remove if --ignorefirstpromoted: " + str(todelete)
        helpers.log_msg(msg, 'DEBUG')

        for version in cvinfo['versions']:
            # Get composite content views for version
            cvv = get_content_view_version(version['id'])
            # Find versions that are not in any environment and not in any composite content view
            if not version['environment_ids'] and not cvv['composite_content_view_ids']:
                if not locked:
                    msg = "Orphan view version " + str(version['version']) + " found in '" +\
                        str(ver_descr[cvid]) + "'"
                    helpers.log_msg(msg, 'DEBUG')

                    if ignorefirstpromoted:
                        if cleanall:
                            msg = "Removing version " + str(version['version'])
                            helpers.log_msg(msg, 'INFO')
                            print helpers.HEADER + msg + helpers.ENDC
                        else:
                            if float(version['version']) in todelete:
                                # If ignorefirstpromoted delete CV
                                msg = "Removing version " + str(version['version'])
                                helpers.log_msg(msg, 'INFO')
                                print helpers.HEADER + msg + helpers.ENDC
                            else:
                                msg = "Skipping delete of version " + str(version['version']) + " due to --keep value"
                                helpers.log_msg(msg, 'INFO')
                                print msg
                                continue
                    else:
                        if float(version['version']) > float(lastver):
                            # If we have chosen to remove all orphans
                            if cleanall:
                                msg = "Removing version " + str(version['version'])
                                helpers.log_msg(msg, 'INFO')
                                print helpers.HEADER + msg + helpers.ENDC
                            else:
                                msg = "Skipping delete of version " + str(version['version'])
                                helpers.log_msg(msg, 'INFO')
                                print msg
                                continue
                        else:
                            if float(version['version']) < (lastver - float(ver_keep[cvid])):
                                msg = "Removing version " + str(version['version'])
                                helpers.log_msg(msg, 'INFO')
                                print helpers.HEADER + msg + helpers.ENDC
                            else:
                                msg = "Skipping delete of version " + str(version['version']) + " due to --keep value"
                                helpers.log_msg(msg, 'INFO')
                                print msg
                                continue

                # Delete the view version from the content view
                if not dry_run and not locked:
                    try:
                        task = helpers.put_json(
                            helpers.KATELLO_API + "content_views/" + str(cvid) + "/remove/",
                            json.dumps(
                                {
                                    "id": cvid,
                                    "content_view_version_ids": version['id']
                                }
                            )
                        )

                        if id in task:
                            task_id = task['id']

                            # Wait for the task to complete
                            helpers.wait_for_task(task_id,'clean')
    
                            # Check if the deletion completed successfully
                            tinfo = helpers.get_task_status(task_id)
                            if tinfo['state'] != 'running' and tinfo['result'] == 'success':
                                msg = "Removal of content view version OK"
                                helpers.log_msg(msg, 'INFO')
                                print helpers.GREEN + "OK" + helpers.ENDC
                            else:
                                msg = "Failed"
                                helpers.log_msg(msg, 'ERROR')
                        else:
                            msg = "Can't remove content view " + str(cvid)
                            helpers.log_msg(msg, 'INFO')
                            print helpers.HEADER + msg + helpers.ENDC

                    except Warning:
                        msg = "Failed to initiate removal"
                        helpers.log_msg(msg, 'WARNING')

    # Exit in the case of a dry-run
    if dry_run:
        msg = "Dry run - not actually performing removal"
        helpers.log_msg(msg, 'WARNING')
        sys.exit(2)
Beispiel #2
0
def cleanup(ver_list, ver_descr, dry_run, runuser, ver_keep, cleanall,
            ignorefirstpromoted):
    """Clean Content Views"""

    # Set the task name to be displayed in the task monitoring stage
    task_name = "Cleanup content views"

    # Now we have all the info needed, we can actually trigger the cleanup.
    task_list = []
    ref_list = {}

    # Catch scenario that no CV versions are found matching cleanup criteria
    if not ver_list:
        msg = "No content view versions found matching cleanup criteria"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(1)

    for cvid in ver_list.keys():
        msg = "Cleaning content view '" + str(ver_descr[cvid]) + "'"
        helpers.log_msg(msg, 'INFO')
        print helpers.HEADER + msg + helpers.ENDC

        # Check if there is a publish/promote already running on this content view
        locked = helpers.check_running_publish(ver_list[cvid], ver_descr[cvid])
        if locked:
            continue

        # For the given content view we need to find the orphaned versions
        cvinfo = get_content_view_info(cvid)

        # Find the oldest published version
        version_list = []
        orphan_versions = []
        orphan_dict = {}
        all_versions = []
        ccv_versions = []
        for version in cvinfo['versions']:

            # Check if the version is part of a published view.
            # This is not returned in cvinfo, and we need to see if we are part of a CCV
            version_in_use, version_in_ccv = check_version_views(version['id'])

            # Build a list of ALL version numbers
            all_versions.append(float(version['version']))
            # Add any version numbers that are part of a CCV to a list
            if version_in_ccv:
                ccv_versions.append(float(version['version']))
            if not version['environment_ids']:
                # These are the versions that don't belong to an environment (i.e. orphans)
                # We also cross-check for versions that may be in a CCV here.
                # We add the version name and id into a dictionary so we can delete by id.
                if not version_in_use:
                    orphan_versions.append(float(version['version']))
                    orphan_dict[version['version']] = version['id']
                    continue
            else:
                msg = "Found version " + str(version['version'])
                helpers.log_msg(msg, 'DEBUG')
                # Add the version id to a list
                version_list.append(float(version['version']))

        # Find the oldest 'in use' version id
        if not version_list:
            msg = "No oldest in-use version found"
        else:
            lastver = min(version_list)
            msg = "Oldest in-use version is " + str(lastver)
        helpers.log_msg(msg, 'DEBUG')

        # Find the oldest 'NOT in use' version id
        if not orphan_versions:
            msg = "No oldest NOT-in-use version found"
        else:
            msg = "Oldest NOT-in-use version is " + str(min(orphan_versions))
        helpers.log_msg(msg, 'DEBUG')

        # Find the element position in the all_versions list of the oldest in-use version
        # e.g. vers 102.0 is oldest in-use and is element [5] in the all_versions list
        list_position = [i for i, x in enumerate(all_versions) if x == lastver]
        # Remove the number of views to keep from the element position of the oldest in-use
        # e.g. keep=2 results in an adjusted list element position [3]
        num_to_delete = list_position[0] - int(ver_keep[cvid])
        # Delete from position [0] to the first 'keep' position
        # e.g. first keep element is [3] so list of elements [0, 1, 2] is created
        list_pos_to_delete = [i for i in range(num_to_delete)]

        # Find versions to delete (based on keep parameter)
        # Make sure the version list is in order
        orphan_versions.sort()

        if cleanall:
            # Remove all orphaned versions
            todelete = orphan_versions
        elif ignorefirstpromoted:
            # Remove the last 'keep' elements from the orphans list (from PR #26)
            todelete = orphan_versions[:(len(orphan_versions) -
                                         int(ver_keep[cvid]))]
        else:
            todelete = []
            # Remove the element numbers for deletion from the list all versions
            for i in sorted(list_pos_to_delete, reverse=True):
                todelete.append(orphan_versions[i])

        msg = "Versions to remove: " + str(todelete)
        helpers.log_msg(msg, 'DEBUG')

        for version in all_versions:
            if not locked:
                if version in todelete:
                    msg = "Orphan view version " + str(version) + " found in '" +\
                        str(ver_descr[cvid]) + "'"
                    helpers.log_msg(msg, 'DEBUG')

                    # Lookup the version_id from our orphan_dict
                    delete_id = orphan_dict.get(str(version))

                    msg = "Removing version " + str(version)
                    helpers.log_msg(msg, 'INFO')
                    print helpers.HEADER + msg + helpers.ENDC
                else:
                    if version in ccv_versions:
                        msg = "Skipping delete of version " + str(
                            version) + " (member of a CCV)"
                    elif version in orphan_versions:
                        msg = "Skipping delete of version " + str(
                            version) + " (due to keep value)"
                    else:
                        msg = "Skipping delete of version " + str(
                            version) + " (in use)"
                    helpers.log_msg(msg, 'INFO')
                    print msg
                    continue
            else:
                msg = "Version " + str(version) + " is locked"
                helpers.log_msg(msg, 'WARNING')
                continue

            # Delete the view version from the content view
            if not dry_run and not locked:
                try:
                    task_id = helpers.put_json(
                        helpers.KATELLO_API + "content_views/" + str(cvid) +
                        "/remove/",
                        json.dumps({
                            "id": cvid,
                            "content_view_version_ids": delete_id
                        }))['id']

                    # Wait for the task to complete
                    helpers.wait_for_task(task_id, 'clean')

                    # Check if the deletion completed successfully
                    tinfo = helpers.get_task_status(task_id)
                    if tinfo['state'] != 'running' and tinfo[
                            'result'] == 'success':
                        msg = "Removal of content view version OK"
                        helpers.log_msg(msg, 'INFO')
                        print helpers.GREEN + "OK" + helpers.ENDC
                    else:
                        msg = "Failed"
                        helpers.log_msg(msg, 'ERROR')

                except Warning:
                    msg = "Failed to initiate removal"
                    helpers.log_msg(msg, 'WARNING')

                except KeyError:
                    msg = "Failed to initiate removal (KeyError)"
                    helpers.log_msg(msg, 'WARNING')

    # Exit in the case of a dry-run
    if dry_run:
        msg = "Dry run - not actually performing removal"
        helpers.log_msg(msg, 'WARNING')
        sys.exit(2)
Beispiel #3
0
def main(args):
    """
    Main Routine
    """
    #pylint: disable-msg=R0912,R0914,R0915

    if helpers.DISCONNECTED:
        msg = "Export cannot be run on the disconnected Satellite host"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Set the base dir of the script and where the var data is
    global dir
    global vardir
    dir = os.path.dirname(__file__)
    vardir = os.path.join(dir, 'var')

    # Log the fact we are starting
    msg = "------------- Content export started by " + runuser + " ----------------"
    helpers.log_msg(msg, 'INFO')

    # Check for sane input
    parser = argparse.ArgumentParser(
        description='Performs Export of Default Content View.')
    group = parser.add_mutually_exclusive_group()
    # pylint: disable=bad-continuation
    parser.add_argument('-o',
                        '--org',
                        help='Organization (Uses default if not specified)',
                        required=False)
    parser.add_argument('-e',
                        '--env',
                        help='Environment config file',
                        required=False)
    group.add_argument('-a',
                       '--all',
                       help='Export ALL content',
                       required=False,
                       action="store_true")
    group.add_argument('-i',
                       '--incr',
                       help='Incremental Export of content since last run',
                       required=False,
                       action="store_true")
    group.add_argument('-s',
                       '--since',
                       help='Export content since YYYY-MM-DD HH:MM:SS',
                       required=False,
                       type=helpers.valid_date)
    parser.add_argument('-l',
                        '--last',
                        help='Display time of last export',
                        required=False,
                        action="store_true")
    parser.add_argument('-n',
                        '--nogpg',
                        help='Skip GPG checking',
                        required=False,
                        action="store_true")
    parser.add_argument('-r',
                        '--repodata',
                        help='Include repodata for repos with no new packages',
                        required=False,
                        action="store_true")
    args = parser.parse_args()

    # Set our script variables from the input args
    if args.org:
        org_name = args.org
    else:
        org_name = helpers.ORG_NAME
    since = args.since

    # Record where we are running from
    script_dir = str(os.getcwd())

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)
    exported_repos = []
    # If a specific environment is requested, find and read that config file
    repocfg = os.path.join(dir, 'config/' + args.env + '.yml')
    if args.env:
        if not os.path.exists(repocfg):
            print "ERROR: Config file " + repocfg + " not found."
            sys.exit(-1)
        cfg = yaml.safe_load(open(repocfg, 'r'))
        ename = args.env
        erepos = cfg["env"]["repos"]
        msg = "Specific environment export called for " + ename + ". Configured repos:"
        helpers.log_msg(msg, 'DEBUG')
        for repo in erepos:
            msg = "  - " + repo
            helpers.log_msg(msg, 'DEBUG')

    else:
        ename = 'DoV'
        label = 'DoV'
        msg = "DoV export called"
        helpers.log_msg(msg, 'DEBUG')

    # Get the current time - this will be the 'last export' time if the export is OK
    start_time = datetime.datetime.strftime(datetime.datetime.now(),
                                            '%Y-%m-%d %H:%M:%S')
    print "START: " + start_time + " (" + ename + " export)"

    # Read the last export date pickle for our selected repo group.
    export_times = read_pickle(ename)
    export_type = 'incr'

    if args.all:
        print "Performing full content export for " + ename
        export_type = 'full'
        since = False
    else:
        if not since:
            since = False
            if args.last:
                if export_times:
                    print "Last successful export for " + ename + ":"
                    for time in export_times:
                        repo = "{:<70}".format(time)
                        print repo[:70] + '\t' + str(export_times[time])
                else:
                    print "Export has never been performed for " + ename
                sys.exit(-1)
            if not export_times:
                print "No prior export recorded for " + ename + ", performing full content export"
                export_type = 'full'
        else:
            # Re-populate export_times dictionary so each repo has 'since' date
            since_export = str(since)

            # We have our timestamp so we can kick of an incremental export
            print "Incremental export of content for " + ename + " synchronised after " \
            + str(since)

    # Check the available space in /var/lib/pulp
    check_disk_space(export_type)

    # Remove any previous exported content left behind by prior unclean exit
    if os.path.exists(helpers.EXPORTDIR + '/export'):
        msg = "Removing existing export directory"
        helpers.log_msg(msg, 'DEBUG')
        shutil.rmtree(helpers.EXPORTDIR + '/export')

    # Collect a list of enabled repositories. This is needed for:
    # 1. Matching specific repo exports, and
    # 2. Running import sync per repo on the disconnected side
    repolist = helpers.get_p_json(
        helpers.KATELLO_API + "/repositories/", \
                json.dumps(
                        {
                           "organization_id": org_id,
                           "per_page": '1000',
                        }
                ))

    # If we are running a full DoV export we run a different set of API calls...
    if ename == 'DoV':
        cola = "Exporting DoV"
        if export_type == 'incr' and 'DoV' in export_times:
            last_export = export_times['DoV']
            if since:
                last_export = since_export
            colb = "(INCR since " + last_export + ")"
        else:
            export_type = 'full'
            last_export = '2000-01-01 12:00:00'  # This is a dummy value, never used.
            colb = "(FULL)"
        msg = cola + " " + colb
        helpers.log_msg(msg, 'INFO')
        output = "{:<70}".format(cola)
        print output[:70] + ' ' + colb

        # Check if there are any currently running tasks that will conflict with an export
        check_running_tasks(label, ename)

        # Get the version of the CV (Default Org View) to export
        dov_ver = get_cv(org_id)

        # Now we have a CV ID and a starting date, and no conflicting tasks, we can export
        export_id = export_cv(dov_ver, last_export, export_type)

        # Now we need to wait for the export to complete
        helpers.wait_for_task(export_id, 'export')

        # Check if the export completed OK. If not we exit the script.
        tinfo = helpers.get_task_status(export_id)
        if tinfo['state'] != 'running' and tinfo['result'] == 'success':
            msg = "Content View Export OK"
            helpers.log_msg(msg, 'INFO')
            print helpers.GREEN + msg + helpers.ENDC

            # Update the export timestamp for this repo
            export_times['DoV'] = start_time

            # Generate a list of repositories that were exported
            for repo_result in repolist['results']:
                if repo_result['content_type'] == 'yum':
                    # Add the repo to the successfully exported list
                    exported_repos.append(repo_result['label'])

        else:
            msg = "Content View Export FAILED"
            helpers.log_msg(msg, 'ERROR')
            sys.exit(-1)

    else:
        # Verify that defined repos exist in Satellite
        for repo in erepos:
            repo_in_sat = False
            for repo_x in repolist['results']:
                if re.findall("\\b" + repo + "\\b$", repo_x['label']):
                    repo_in_sat = True
                    break
            if repo_in_sat == False:
                msg = "'" + repo + "' not found in Satellite"
                helpers.log_msg(msg, 'WARNING')

        # Process each repo
        for repo_result in repolist['results']:
            if repo_result['content_type'] == 'yum':
                # If we have a match, do the export
                if repo_result['label'] in erepos:
                    # Extract the last export time for this repo
                    orig_export_type = export_type
                    cola = "Export " + repo_result['label']
                    if export_type == 'incr' and repo_result[
                            'label'] in export_times:
                        last_export = export_times[repo_result['label']]
                        if since:
                            last_export = since_export
                        colb = "(INCR since " + last_export + ")"
                    else:
                        export_type = 'full'
                        last_export = '2000-01-01 12:00:00'  # This is a dummy value, never used.
                        colb = "(FULL)"
                    msg = cola + " " + colb
                    helpers.log_msg(msg, 'INFO')
                    output = "{:<70}".format(cola)
                    print output[:70] + ' ' + colb

                    # Check if there are any currently running tasks that will conflict
                    ok_to_export = check_running_tasks(repo_result['label'],
                                                       ename)

                    if ok_to_export:
                        # Trigger export on the repo
                        export_id = export_repo(repo_result['id'], last_export,
                                                export_type)

                        # Now we need to wait for the export to complete
                        helpers.wait_for_task(export_id, 'export')

                        # Check if the export completed OK. If not we exit the script.
                        tinfo = helpers.get_task_status(export_id)
                        if tinfo['state'] != 'running' and tinfo[
                                'result'] == 'success':
                            # Count the number of exported packages
                            # First resolve the product label - this forms part of the export path
                            product = get_product(
                                org_id, repo_result['product']['cp_id'])
                            # Now we can build the export path itself
                            basepath = helpers.EXPORTDIR + "/" + org_name + "-" + product + "-" + repo_result[
                                'label']
                            if export_type == 'incr':
                                basepath = basepath + "-incremental"
                            exportpath = basepath + "/" + repo_result[
                                'relative_path']
                            msg = "\nExport path = " + exportpath
                            helpers.log_msg(msg, 'DEBUG')

                            os.chdir(exportpath)
                            numrpms = len([
                                f for f in os.walk(".").next()[2]
                                if f[-4:] == ".rpm"
                            ])

                            msg = "Repository Export OK (" + str(
                                numrpms) + " new packages)"
                            helpers.log_msg(msg, 'INFO')
                            print helpers.GREEN + msg + helpers.ENDC

                            # Update the export timestamp for this repo
                            export_times[repo_result['label']] = start_time

                            # Add the repo to the successfully exported list
                            if numrpms != 0 or args.repodata:
                                msg = "Adding " + repo_result[
                                    'label'] + " to export list"
                                helpers.log_msg(msg, 'DEBUG')
                                exported_repos.append(repo_result['label'])
                            else:
                                msg = "Not including repodata for empty repo " + repo_result[
                                    'label']
                                helpers.log_msg(msg, 'DEBUG')

                        else:
                            msg = "Export FAILED"
                            helpers.log_msg(msg, 'ERROR')

                        # Reset the export type to the user specified, in case we overrode it.
                        export_type = orig_export_type

                else:
                    msg = "Skipping  " + repo_result['label']
                    helpers.log_msg(msg, 'DEBUG')

            # Handle FILE type exports (ISO repos)
            elif repo_result['content_type'] == 'file':
                # If we have a match, do the export
                if repo_result['label'] in erepos:
                    # Extract the last export time for this repo
                    orig_export_type = export_type
                    cola = "Export " + repo_result['label']
                    if export_type == 'incr' and repo_result[
                            'label'] in export_times:
                        last_export = export_times[repo_result['label']]
                        if since:
                            last_export = since_export
                        colb = "(INCR since " + last_export + ")"
                    else:
                        export_type = 'full'
                        last_export = '2000-01-01 12:00:00'  # This is a dummy value, never used.
                        colb = "(FULL)"
                    msg = cola + " " + colb
                    helpers.log_msg(msg, 'INFO')
                    output = "{:<70}".format(cola)
                    print output[:70] + ' ' + colb

                    # Check if there are any currently running tasks that will conflict
                    ok_to_export = check_running_tasks(repo_result['label'],
                                                       ename)

                    if ok_to_export:
                        # Trigger export on the repo
                        numfiles = export_iso(repo_result['id'],
                                              repo_result['label'],
                                              repo_result['relative_path'],
                                              last_export, export_type)

                        # Reset the export type to the user specified, in case we overrode it.
                        export_type = orig_export_type

                        # Update the export timestamp for this repo
                        export_times[repo_result['label']] = start_time

                        # Add the repo to the successfully exported list
                        if numfiles != 0 or args.repodata:
                            msg = "Adding " + repo_result[
                                'label'] + " to export list"
                            helpers.log_msg(msg, 'DEBUG')
                            exported_repos.append(repo_result['label'])
                        else:
                            msg = "Not including repodata for empty repo " + repo_result[
                                'label']
                            helpers.log_msg(msg, 'DEBUG')

                else:
                    msg = "Skipping  " + repo_result['label']
                    helpers.log_msg(msg, 'DEBUG')

    # Combine resulting directory structures into a single repo format (top level = /content)
    prep_export_tree(org_name)

    # Now we need to process the on-disk export data.
    # Define the location of our exported data.
    export_dir = helpers.EXPORTDIR + "/export"

    # Write out the list of exported repos. This will be transferred to the disconnected system
    # and used to perform the repo sync tasks during the import.
    pickle.dump(exported_repos, open(export_dir + '/exported_repos.pkl', 'wb'))

    # Run GPG Checks on the exported RPMs
    if not args.nogpg:
        do_gpg_check(export_dir)

    # Add our exported data to a tarfile
    create_tar(export_dir, ename)

    # We're done. Write the start timestamp to file for next time
    os.chdir(script_dir)
    pickle.dump(export_times, open(vardir + '/exports_' + ename + '.pkl',
                                   "wb"))

    # And we're done!
    print helpers.GREEN + "Export complete.\n" + helpers.ENDC
    print 'Please transfer the contents of ' + helpers.EXPORTDIR + \
        ' to your disconnected Satellite system content import location.\n' \
        'Once transferred, please run ' + helpers.BOLD + ' sat_import' \
        + helpers.ENDC + ' to extract it.'
def cleanup(ver_list, ver_descr, dry_run, runuser, ver_keep, cleanall, ignorefirstpromoted):
    """Clean Content Views"""

    # Set the task name to be displayed in the task monitoring stage
    task_name = "Cleanup content views"

    # Now we have all the info needed, we can actually trigger the cleanup.
    task_list = []
    ref_list = {}

    # Catch scenario that no CV versions are found matching cleanup criteria
    if not ver_list:
        msg = "No content view versions found matching cleanup criteria"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(1)

    for cvid in ver_list.keys():
        msg = "Cleaning content view '" + str(ver_descr[cvid]) + "'"
        helpers.log_msg(msg, 'INFO')
        print helpers.HEADER + msg + helpers.ENDC

        # Check if there is a publish/promote already running on this content view
        locked = helpers.check_running_publish(ver_list[cvid], ver_descr[cvid])
        if locked:
            continue

        # For the given content view we need to find the orphaned versions
        cvinfo = get_content_view_info(cvid)

        # Find the oldest published version
        version_list = []
        orphan_versions = []
        orphan_dict = {}
        all_versions = []
        ccv_versions = []
        for version in cvinfo['versions']:

            # Check if the version is part of a published view.
            # This is not returned in cvinfo, and we need to see if we are part of a CCV
            version_in_use, version_in_ccv = check_version_views(version['id'])

            # Build a list of ALL version numbers
            all_versions.append(float(version['version']))
            # Add any version numbers that are part of a CCV to a list
            if version_in_ccv:
                ccv_versions.append(float(version['version']))
            if not version['environment_ids']:
                # These are the versions that don't belong to an environment (i.e. orphans)
                # We also cross-check for versions that may be in a CCV here.
                # We add the version name and id into a dictionary so we can delete by id.
                if not version_in_use:
                    orphan_versions.append(float(version['version']))
                    orphan_dict[version['version']] = version['id']
                    continue
            else:
                msg = "Found version " + str(version['version'])
                helpers.log_msg(msg, 'DEBUG')
                # Add the version id to a list
                version_list.append(float(version['version']))

        # Find the oldest 'in use' version id
        if not version_list:
            msg = "No oldest in-use version found"
        else:
            lastver = min(version_list)
            msg = "Oldest in-use version is " + str(lastver)
        helpers.log_msg(msg, 'DEBUG')

        # Find the oldest 'NOT in use' version id
        if not orphan_versions:
            msg = "No oldest NOT-in-use version found"
        else:
            msg = "Oldest NOT-in-use version is " + str(min(orphan_versions))
        helpers.log_msg(msg, 'DEBUG')

        # Find the element position in the all_versions list of the oldest in-use version
        # e.g. vers 102.0 is oldest in-use and is element [5] in the all_versions list
        list_position = [i for i,x in enumerate(all_versions) if x == lastver]
        # Remove the number of views to keep from the element position of the oldest in-use
        # e.g. keep=2 results in an adjusted list element position [3]
        num_to_delete = list_position[0] - int(ver_keep[cvid])
        # Delete from position [0] to the first 'keep' position
        # e.g. first keep element is [3] so list of elements [0, 1, 2] is created
        list_pos_to_delete = [i for i in range(num_to_delete)]

        # Find versions to delete (based on keep parameter)
        # Make sure the version list is in order
        orphan_versions.sort()

        if cleanall:
            # Remove all orphaned versions
            todelete = orphan_versions
        elif ignorefirstpromoted:
            # Remove the last 'keep' elements from the orphans list (from PR #26)
            todelete = orphan_versions[:(len(orphan_versions) - int(ver_keep[cvid]))]
        else:
            todelete = []
            # Remove the element numbers for deletion from the list all versions
            for i in sorted(list_pos_to_delete, reverse=True):
                todelete.append(orphan_versions[i])

        msg = "Versions to remove: " + str(todelete)
        helpers.log_msg(msg, 'DEBUG')

        for version in all_versions:
            if not locked:
                if version in todelete:
                    msg = "Orphan view version " + str(version) + " found in '" +\
                        str(ver_descr[cvid]) + "'"
                    helpers.log_msg(msg, 'DEBUG')

                    # Lookup the version_id from our orphan_dict
                    delete_id = orphan_dict.get(str(version))

                    msg = "Removing version " + str(version)
                    helpers.log_msg(msg, 'INFO')
                    print helpers.HEADER + msg + helpers.ENDC
                else:
                    if version in ccv_versions:
                        msg = "Skipping delete of version " + str(version) + " (member of a CCV)"
                    elif version in orphan_versions:
                        msg = "Skipping delete of version " + str(version) + " (due to keep value)"
                    else:
                        msg = "Skipping delete of version " + str(version) + " (in use)"
                    helpers.log_msg(msg, 'INFO')
                    print msg
                    continue
            else:
                msg = "Version " + str(version) + " is locked"
                helpers.log_msg(msg, 'WARNING')
                continue

            # Delete the view version from the content view
            if not dry_run and not locked:
                try:
                    task_id = helpers.put_json(
                        helpers.KATELLO_API + "content_views/" + str(cvid) + "/remove/",
                        json.dumps(
                            {
                                "id": cvid,
                                "content_view_version_ids": delete_id
                            }
                            ))['id']

                    # Wait for the task to complete
                    helpers.wait_for_task(task_id,'clean')

                    # Check if the deletion completed successfully
                    tinfo = helpers.get_task_status(task_id)
                    if tinfo['state'] != 'running' and tinfo['result'] == 'success':
                        msg = "Removal of content view version OK"
                        helpers.log_msg(msg, 'INFO')
                        print helpers.GREEN + "OK" + helpers.ENDC
                    else:
                        msg = "Failed"
                        helpers.log_msg(msg, 'ERROR')

                except Warning:
                    msg = "Failed to initiate removal"
                    helpers.log_msg(msg, 'WARNING')

                except KeyError:
                    msg = "Failed to initiate removal (KeyError)"
                    helpers.log_msg(msg, 'WARNING')

    # Exit in the case of a dry-run
    if dry_run:
        msg = "Dry run - not actually performing removal"
        helpers.log_msg(msg, 'WARNING')
        sys.exit(2)
Beispiel #5
0
def sync_content(org_id, imported_repos):
    """
    Synchronize the repositories
    Triggers a sync of all repositories belonging to the configured sync plan
    """
    repos_to_sync = []
    delete_override = False

    # Get a listing of repositories in this Satellite
    enabled_repos = helpers.get_p_json(
        helpers.KATELLO_API + "/repositories/", \
            json.dumps(
                {
                    "organization_id": org_id,
                    "per_page": '1000',
                }
            ))

    # Loop through each repo to be imported/synced
    for repo in imported_repos:
        do_import = False
        for repo_result in enabled_repos['results']:
            if repo in repo_result['label']:
                # Ensure we have an exact match on the repo label
                if repo == repo_result['label']:
                    do_import = True
                    repos_to_sync.append(repo_result['id'])

                    # Ensure Mirror-on-sync flag is set to FALSE to make sure incremental
                    # import does not (cannot) delete existing packages.
                    msg = "Setting mirror-on-sync=false for repo id " + str(
                        repo_result['id'])
                    helpers.log_msg(msg, 'DEBUG')
                    helpers.put_json(
                        helpers.KATELLO_API + "/repositories/" + str(repo_result['id']), \
                            json.dumps(
                                {
                                    "mirror_on_sync": False
                                }
                            ))

        if do_import:
            msg = "Repo " + repo + " found in Satellite"
            helpers.log_msg(msg, 'DEBUG')
        else:
            msg = "Repo " + repo + " is not enabled in Satellite"
            # If the repo is not enabled, don't delete the input files.
            # This gives the admin a chance to manually enable the repo and re-import
            delete_override = True
            helpers.log_msg(msg, 'WARNING')
            # TODO: We could go on here and try to enable the Red Hat repo .....

    # If we get to here and nothing was added to repos_to_sync we will abort the import.
    # This will probably occur on the initial import - nothing will be enabled in Satellite.
    # Also if there are no updates during incremental sync.
    if not repos_to_sync:
        msg = "No updates in imported content - skipping sync"
        helpers.log_msg(msg, 'WARNING')
        return
    else:
        msg = "Repo ids to sync: " + str(repos_to_sync)
        helpers.log_msg(msg, 'DEBUG')

        msg = "Syncing repositories"
        helpers.log_msg(msg, 'INFO')
        print msg

        # Break repos_to_sync into groups of n
        repochunks = [
            repos_to_sync[i:i + helpers.SYNCBATCH]
            for i in range(0, len(repos_to_sync), helpers.SYNCBATCH)
        ]

        # Loop through the smaller batches of repos and sync them
        for chunk in repochunks:
            chunksize = len(chunk)
            msg = "Syncing repo batch " + str(chunk)
            helpers.log_msg(msg, 'DEBUG')
            task_id = helpers.post_json(
                helpers.KATELLO_API + "repositories/bulk/sync", \
                    json.dumps(
                        {
                            "ids": chunk,
                        }
                    ))["id"]
            msg = "Repo sync task id = " + task_id
            helpers.log_msg(msg, 'DEBUG')

            # Now we need to wait for the sync to complete
            helpers.wait_for_task(task_id, 'sync')

            tinfo = helpers.get_task_status(task_id)
            if tinfo['state'] != 'running' and tinfo['result'] == 'success':
                msg = "Batch of " + str(chunksize) + " repos complete"
                helpers.log_msg(msg, 'INFO')
                print helpers.GREEN + msg + helpers.ENDC
            else:
                msg = "Batch sync has errors"
                helpers.log_msg(msg, 'WARNING')

        return delete_override
Beispiel #6
0
def sync_content(org_id, imported_repos):
    """
    Synchronize the repositories
    Triggers a sync of all repositories belonging to the configured sync plan
    """
    repos_to_sync = []
    delete_override = False

    # Get a listing of repositories in this Satellite
    enabled_repos = helpers.get_p_json(
        helpers.KATELLO_API + "/repositories/", \
            json.dumps(
                {
                    "organization_id": org_id,
                    "per_page": '1000',
                }
            ))

    # Loop through each repo to be imported/synced
    for repo in imported_repos:
        do_import = False
        for repo_result in enabled_repos['results']:
            if repo in repo_result['label']:
                do_import = True
                repos_to_sync.append(repo_result['id'])

                # Ensure Mirror-on-sync flag is set to FALSE to make sure incremental
                # import does not (cannot) delete existing packages.
                msg = "Setting mirror-on-sync=false for repo id " + str(repo_result['id'])
                helpers.log_msg(msg, 'DEBUG')
                helpers.put_json(
                    helpers.KATELLO_API + "/repositories/" + str(repo_result['id']), \
                        json.dumps(
                            {
                                "mirror_on_sync": False
                            }
                        ))

        if do_import:
            msg = "Repo " + repo + " found in Satellite"
            helpers.log_msg(msg, 'DEBUG')
        else:
            msg = "Repo " + repo + " is not enabled in Satellite"
            # If the repo is not enabled, don't delete the input files.
            # This gives the admin a chance to manually enable the repo and re-import
            delete_override = True
            helpers.log_msg(msg, 'WARNING')
            # TODO: We could go on here and try to enable the Red Hat repo .....

    # If we get to here and nothing was added to repos_to_sync we will abort the import.
    # This will probably occur on the initial import - nothing will be enabled in Satellite.
    # Also if there are no updates during incremental sync.
    if not repos_to_sync:
        msg = "No updates in imported content - skipping sync"
        helpers.log_msg(msg, 'WARNING')
        return
    else:
        msg = "Repo ids to sync: " + str(repos_to_sync)
        helpers.log_msg(msg, 'DEBUG')

        msg = "Syncing repositories"
        helpers.log_msg(msg, 'INFO')
        print msg

        # Break repos_to_sync into groups of n 
        repochunks = [ repos_to_sync[i:i+helpers.SYNCBATCH] for i in range(0, len(repos_to_sync), helpers.SYNCBATCH) ]

        # Loop through the smaller batches of repos and sync them
        for chunk in repochunks:
            chunksize = len(chunk)
            msg = "Syncing repo batch " + str(chunk)
            helpers.log_msg(msg, 'DEBUG')
            task_id = helpers.post_json(
                helpers.KATELLO_API + "repositories/bulk/sync", \
                    json.dumps(
                        {
                            "ids": chunk,
                        }
                    ))["id"]
            msg = "Repo sync task id = " + task_id
            helpers.log_msg(msg, 'DEBUG')

            # Now we need to wait for the sync to complete
            helpers.wait_for_task(task_id, 'sync')

            tinfo = helpers.get_task_status(task_id)
            if tinfo['state'] != 'running' and tinfo['result'] == 'success':
                msg = "Batch of " + str(chunksize) + " repos complete"
                helpers.log_msg(msg, 'INFO')
                print helpers.GREEN + msg + helpers.ENDC
            else:
                msg = "Batch sync has errors"
                helpers.log_msg(msg, 'WARNING')

        return delete_override
def main():
    """
    Main Routine
    """
    #pylint: disable-msg=R0912,R0914,R0915

    if helpers.DISCONNECTED:
        msg = "Export cannot be run on the disconnected Satellite host"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Log the fact we are starting
    msg = "------------- Content export started by " + runuser + " ----------------"
    helpers.log_msg(msg, 'INFO')

    # Check for sane input
    parser = argparse.ArgumentParser(description='Performs Export of Default Content View.')
    group = parser.add_mutually_exclusive_group()
    # pylint: disable=bad-continuation
    parser.add_argument('-o', '--org', help='Organization', required=True)
    parser.add_argument('-e', '--env', help='Environment config file', required=False)
    group.add_argument('-a', '--all', help='Export ALL content', required=False,
        action="store_true")
    group.add_argument('-i', '--incr', help='Incremental Export of content since last run',
        required=False, action="store_true")
    group.add_argument('-s', '--since', help='Export content since YYYY-MM-DD HH:MM:SS',
        required=False, type=helpers.valid_date)
    parser.add_argument('-l', '--last', help='Display time of last export', required=False,
        action="store_true")
    args = parser.parse_args()

    # Set our script variables from the input args
    org_name = args.org
    since = args.since

    # Record where we are running from
    script_dir = str(os.getcwd())

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)
    exported_repos = []
    # If a specific environment is requested, find and read that config file
    if args.env:
        if not os.path.exists('config/' + args.env + '.yml'):
            print "ERROR: Config file 'config/" + args.env + ".yml' not found."
            sys.exit(-1)
        cfg = yaml.safe_load(open("config/" + args.env + ".yml", 'r'))
        ename = args.env
        erepos = cfg["env"]["repos"]
        msg = "Specific environment export called for " + ename + ". Configured repos:"
        helpers.log_msg(msg, 'DEBUG')
        for repo in erepos:
            msg = "  - " + repo
            helpers.log_msg(msg, 'DEBUG')

    else:
        ename = 'DoV'
        label = 'DoV'
        msg = "DoV export called"
        helpers.log_msg(msg, 'DEBUG')

    # Get the current time - this will be the 'last export' time if the export is OK
    start_time = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
    print "START: " + start_time + " (" + ename + " export)"

    # Read the last export date pickle for our selected repo group.
    export_times = read_pickle(ename)
    export_type = 'incr'

    if args.all:
        print "Performing full content export for " + ename
        export_type = 'full'
        since = False
    else:
        if not since:
            since = False
            if args.last:
                if export_times:
                    print "Last successful export for " + ename + ":"
                    for time in export_times:
                        print str(time) + '\t' + str(export_times[time])
                else:
                    print "Export has never been performed for " + ename
                sys.exit(-1)
            if not export_times:
                print "No prior export recorded for " + ename + ", performing full content export"
                export_type = 'full'
        else:
            # TODO: Re-populate export_times dictionary so each repo has 'since' date
            since = True
            since_export = str(since)

            # We have our timestamp so we can kick of an incremental export
            print "Incremental export of content for " + ename + " synchronised after " \
            + str(since)

    # Check the available space in /var/lib/pulp
    check_disk_space(export_type)

    # TODO: Remove any previous exported content
#    os.chdir(helpers.EXPORTDIR)
#    shutil.rmtree()


    # Collect a list of enabled repositories. This is needed for:
    # 1. Matching specific repo exports, and
    # 2. Running import sync per repo on the disconnected side
    repolist = helpers.get_p_json(
        helpers.KATELLO_API + "/repositories/", \
                json.dumps(
                        {
                           "organization_id": org_id,
                           "per_page": '1000',
                        }
                ))

    # If we are running a full DoV export we run a different set of API calls...
    if ename == 'DoV':
        if export_type == 'incr' and 'DoV' in export_times:
            last_export = export_times['DoV']
            if since:
                last_export = since_export
            msg = "Exporting DoV (INCR since " + last_export + ")"
        else:
            export_type = 'full'
            last_export = '2000-01-01 12:00:00' # This is a dummy value, never used.
            msg = "Exporting DoV (FULL)"
        helpers.log_msg(msg, 'INFO')
        print msg

        # Check if there are any currently running tasks that will conflict with an export
        check_running_tasks(label, ename)

        # Get the version of the CV (Default Org View) to export
        dov_ver = get_cv(org_id)

        # Now we have a CV ID and a starting date, and no conflicting tasks, we can export
        export_id = export_cv(dov_ver, last_export, export_type)

        # Now we need to wait for the export to complete
        helpers.wait_for_task(export_id, 'export')

        # Check if the export completed OK. If not we exit the script.
        tinfo = helpers.get_task_status(export_id)
        if tinfo['state'] != 'running' and tinfo['result'] == 'success':
            msg = "Content View Export OK"
            helpers.log_msg(msg, 'INFO')
            print helpers.GREEN + msg + helpers.ENDC

            # Update the export timestamp for this repo
            export_times['DoV'] = start_time

            # Generate a list of repositories that were exported
            for repo_result in repolist['results']:
                if repo_result['content_type'] == 'yum':
                    # Add the repo to the successfully exported list
                    exported_repos.append(repo_result['label'])

        else:
            msg = "Content View Export FAILED"
            helpers.log_msg(msg, 'ERROR')
            sys.exit(-1)

    else:
        # Verify that defined repos exist in our DoV
        for repo_result in repolist['results']:
            if repo_result['content_type'] == 'yum':
                # If we have a match, do the export
                if repo_result['label'] in erepos:
                    # Extract the last export time for this repo
                    if export_type == 'incr' and repo_result['label'] in export_times:
                        last_export = export_times[repo_result['label']]
                        if since:
                            last_export = since_export
                        msg = "Exporting " + repo_result['label'] \
                            + " (INCR since " + last_export + ")"
                    else:
                        export_type = 'full'
                        last_export = '2000-01-01 12:00:00' # This is a dummy value, never used.
                        msg = "Exporting " + repo_result['label'] + "(FULL)"
                    helpers.log_msg(msg, 'INFO')
                    print msg

                    # Check if there are any currently running tasks that will conflict
                    ok_to_export = check_running_tasks(repo_result['label'], ename)

                    if ok_to_export:
                        # Trigger export on the repo
                        export_id = export_repo(repo_result['id'], last_export, export_type)

                        # Now we need to wait for the export to complete
                        helpers.wait_for_task(export_id, 'export')

                        # Check if the export completed OK. If not we exit the script.
                        tinfo = helpers.get_task_status(export_id)
                        if tinfo['state'] != 'running' and tinfo['result'] == 'success':
                            msg = "Repository Export OK"
                            helpers.log_msg(msg, 'INFO')
                            print helpers.GREEN + msg + helpers.ENDC

                            # Update the export timestamp for this repo
                            export_times[repo_result['label']] = start_time

                            # Add the repo to the successfully exported list
                            exported_repos.append(repo_result['label'])
                        else:
                            msg = "Export FAILED"
                            helpers.log_msg(msg, 'ERROR')


                else:
                    msg = "Skipping  " + repo_result['label']
                    helpers.log_msg(msg, 'DEBUG')


    # Combine resulting directory structures into a single repo format (top level = /content)
    prep_export_tree(org_name)

    # Now we need to process the on-disk export data.
    # Define the location of our exported data.
    export_dir = helpers.EXPORTDIR + "/export"

    # Write out the list of exported repos. This will be transferred to the disconnected system
    # and used to perform the repo sync tasks during the import.
    pickle.dump(exported_repos, open(export_dir + '/exported_repos.pkl', 'wb'))

    # Run GPG Checks on the exported RPMs
    do_gpg_check(export_dir)

    # Add our exported data to a tarfile
    create_tar(export_dir, ename)

    # We're done. Write the start timestamp to file for next time
    os.chdir(script_dir)
    pickle.dump(export_times, open('var/exports_' + ename + '.pkl', "wb"))

    # And we're done!
    print helpers.GREEN + "Export complete.\n" + helpers.ENDC
    print 'Please transfer the contents of ' + helpers.EXPORTDIR + \
        'to your disconnected Satellite system content import location.\n' \
        'Once transferred, please run ' + helpers.BOLD + ' sat_import' \
        + helpers.ENDC + ' to extract it.'
Beispiel #8
0
def main(args):
    """
    Main Routine
    """
    #pylint: disable-msg=R0912,R0914,R0915

    if helpers.DISCONNECTED:
        msg = "Export cannot be run on the disconnected Satellite host"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Set the base dir of the script and where the var data is
    global dir 
    global vardir 
    dir = os.path.dirname(__file__)
    vardir = os.path.join(dir, 'var')

    # Log the fact we are starting
    msg = "------------- Content export started by " + runuser + " ----------------"
    helpers.log_msg(msg, 'INFO')

    # Check for sane input
    parser = argparse.ArgumentParser(description='Performs Export of Default Content View.')
    group = parser.add_mutually_exclusive_group()
    # pylint: disable=bad-continuation
    parser.add_argument('-o', '--org', help='Organization (Uses default if not specified)',
        required=False)
    parser.add_argument('-e', '--env', help='Environment config file', required=False)
    group.add_argument('-a', '--all', help='Export ALL content', required=False,
        action="store_true")
    group.add_argument('-i', '--incr', help='Incremental Export of content since last run',
        required=False, action="store_true")
    group.add_argument('-s', '--since', help='Export content since YYYY-MM-DD HH:MM:SS',
        required=False, type=helpers.valid_date)
    parser.add_argument('-l', '--last', help='Display time of last export', required=False,
        action="store_true")
    parser.add_argument('-n', '--nogpg', help='Skip GPG checking', required=False,
        action="store_true")
    parser.add_argument('-r', '--repodata', help='Include repodata for repos with no new packages', 
        required=False, action="store_true")
    args = parser.parse_args()

    # Set our script variables from the input args
    if args.org:
        org_name = args.org
    else:
       org_name = helpers.ORG_NAME
    since = args.since

    # Record where we are running from
    script_dir = str(os.getcwd())

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)
    exported_repos = []
    # If a specific environment is requested, find and read that config file
    repocfg = os.path.join(dir, 'config/' + args.env + '.yml')
    if args.env:
        if not os.path.exists(repocfg):
            print "ERROR: Config file " + repocfg + " not found."
            sys.exit(-1)
        cfg = yaml.safe_load(open(repocfg, 'r'))
        ename = args.env
        erepos = cfg["env"]["repos"]
        msg = "Specific environment export called for " + ename + ". Configured repos:"
        helpers.log_msg(msg, 'DEBUG')
        for repo in erepos:
            msg = "  - " + repo
            helpers.log_msg(msg, 'DEBUG')

    else:
        ename = 'DoV'
        label = 'DoV'
        msg = "DoV export called"
        helpers.log_msg(msg, 'DEBUG')

    # Get the current time - this will be the 'last export' time if the export is OK
    start_time = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
    print "START: " + start_time + " (" + ename + " export)"

    # Read the last export date pickle for our selected repo group.
    export_times = read_pickle(ename)
    export_type = 'incr'

    if args.all:
        print "Performing full content export for " + ename
        export_type = 'full'
        since = False
    else:
        if not since:
            since = False
            if args.last:
                if export_times:
                    print "Last successful export for " + ename + ":"
                    for time in export_times:
                        repo = "{:<70}".format(time)
                        print repo[:70] + '\t' + str(export_times[time])
                else:
                    print "Export has never been performed for " + ename
                sys.exit(-1)
            if not export_times:
                print "No prior export recorded for " + ename + ", performing full content export"
                export_type = 'full'
        else:
            # Re-populate export_times dictionary so each repo has 'since' date
            since_export = str(since)

            # We have our timestamp so we can kick of an incremental export
            print "Incremental export of content for " + ename + " synchronised after " \
            + str(since)

    # Check the available space in /var/lib/pulp
    check_disk_space(export_type)

    # Remove any previous exported content left behind by prior unclean exit
    if os.path.exists(helpers.EXPORTDIR + '/export'):
        msg = "Removing existing export directory"
        helpers.log_msg(msg, 'DEBUG')
        shutil.rmtree(helpers.EXPORTDIR + '/export')

    # Collect a list of enabled repositories. This is needed for:
    # 1. Matching specific repo exports, and
    # 2. Running import sync per repo on the disconnected side
    repolist = helpers.get_p_json(
        helpers.KATELLO_API + "/repositories/", \
                json.dumps(
                        {
                           "organization_id": org_id,
                           "per_page": '1000',
                        }
                ))

    # If we are running a full DoV export we run a different set of API calls...
    if ename == 'DoV':
        cola = "Exporting DoV"
        if export_type == 'incr' and 'DoV' in export_times:
            last_export = export_times['DoV']
            if since:
                last_export = since_export
            colb = "(INCR since " + last_export + ")"
        else:
            export_type = 'full'
            last_export = '2000-01-01 12:00:00' # This is a dummy value, never used.
            colb = "(FULL)"
        msg = cola + " " + colb
        helpers.log_msg(msg, 'INFO')
        output = "{:<70}".format(cola)
        print output[:70] + ' ' + colb

        # Check if there are any currently running tasks that will conflict with an export
        check_running_tasks(label, ename)

        # Get the version of the CV (Default Org View) to export
        dov_ver = get_cv(org_id)

        # Now we have a CV ID and a starting date, and no conflicting tasks, we can export
        export_id = export_cv(dov_ver, last_export, export_type)

        # Now we need to wait for the export to complete
        helpers.wait_for_task(export_id, 'export')

        # Check if the export completed OK. If not we exit the script.
        tinfo = helpers.get_task_status(export_id)
        if tinfo['state'] != 'running' and tinfo['result'] == 'success':
            msg = "Content View Export OK"
            helpers.log_msg(msg, 'INFO')
            print helpers.GREEN + msg + helpers.ENDC

            # Update the export timestamp for this repo
            export_times['DoV'] = start_time

            # Generate a list of repositories that were exported
            for repo_result in repolist['results']:
                if repo_result['content_type'] == 'yum':
                    # Add the repo to the successfully exported list
                    exported_repos.append(repo_result['label'])

        else:
            msg = "Content View Export FAILED"
            helpers.log_msg(msg, 'ERROR')
            sys.exit(-1)

    else:
        # Verify that defined repos exist in Satellite
        for repo in erepos:
            repo_in_sat = False
            for repo_x in repolist['results']:
                if re.findall("\\b" + repo + "\\b$", repo_x['label']):
                    repo_in_sat = True
                    break
            if repo_in_sat == False:
                msg = "'" + repo + "' not found in Satellite"
                helpers.log_msg(msg, 'WARNING')

        # Process each repo
        for repo_result in repolist['results']:
            if repo_result['content_type'] == 'yum':
                # If we have a match, do the export
                if repo_result['label'] in erepos:
                    # Extract the last export time for this repo
                    orig_export_type = export_type
                    cola = "Export " + repo_result['label']
                    if export_type == 'incr' and repo_result['label'] in export_times:
                        last_export = export_times[repo_result['label']]
                        if since:
                            last_export = since_export
                        colb = "(INCR since " + last_export + ")"
                    else:
                        export_type = 'full'
                        last_export = '2000-01-01 12:00:00' # This is a dummy value, never used.
                        colb = "(FULL)"
                    msg = cola + " " + colb
                    helpers.log_msg(msg, 'INFO')
                    output = "{:<70}".format(cola)
                    print output[:70] + ' ' + colb

                    # Check if there are any currently running tasks that will conflict
                    ok_to_export = check_running_tasks(repo_result['label'], ename)

                    if ok_to_export:
                        # Trigger export on the repo
                        export_id = export_repo(repo_result['id'], last_export, export_type)

                        # Now we need to wait for the export to complete
                        helpers.wait_for_task(export_id, 'export')

                        # Check if the export completed OK. If not we exit the script.
                        tinfo = helpers.get_task_status(export_id)
                        if tinfo['state'] != 'running' and tinfo['result'] == 'success':
                            # Count the number of exported packages
                            # First resolve the product label - this forms part of the export path
                            product = get_product(org_id, repo_result['product']['cp_id'])
                            # Now we can build the export path itself
                            basepath = helpers.EXPORTDIR + "/" + org_name + "-" + product + "-" + repo_result['label']
                            if export_type == 'incr':
                                basepath = basepath + "-incremental"
                            exportpath = basepath + "/" + repo_result['relative_path']
                            msg = "\nExport path = " + exportpath
                            helpers.log_msg(msg, 'DEBUG')

                            os.chdir(exportpath)
                            numrpms = len([f for f in os.walk(".").next()[2] if f[ -4: ] == ".rpm"])

                            msg = "Repository Export OK (" + str(numrpms) + " new packages)"
                            helpers.log_msg(msg, 'INFO')
                            print helpers.GREEN + msg + helpers.ENDC

                            # Update the export timestamp for this repo
                            export_times[repo_result['label']] = start_time

                            # Add the repo to the successfully exported list
                            if numrpms != 0 or args.repodata:
                                msg = "Adding " + repo_result['label'] + " to export list"
                                helpers.log_msg(msg, 'DEBUG')
                                exported_repos.append(repo_result['label'])
                            else:
                                msg = "Not including repodata for empty repo " + repo_result['label']
                                helpers.log_msg(msg, 'DEBUG')

                        else:
                            msg = "Export FAILED"
                            helpers.log_msg(msg, 'ERROR')

                        # Reset the export type to the user specified, in case we overrode it.
                        export_type = orig_export_type

                else:
                    msg = "Skipping  " + repo_result['label']
                    helpers.log_msg(msg, 'DEBUG')

            # Handle FILE type exports (ISO repos)
            elif repo_result['content_type'] == 'file':
                # If we have a match, do the export
                if repo_result['label'] in erepos:
                    # Extract the last export time for this repo
                    orig_export_type = export_type
                    cola = "Export " + repo_result['label']
                    if export_type == 'incr' and repo_result['label'] in export_times:
                        last_export = export_times[repo_result['label']]
                        if since:
                            last_export = since_export
                        colb = "(INCR since " + last_export + ")"
                    else:
                        export_type = 'full'
                        last_export = '2000-01-01 12:00:00' # This is a dummy value, never used.
                        colb = "(FULL)"
                    msg = cola + " " + colb
                    helpers.log_msg(msg, 'INFO')
                    output = "{:<70}".format(cola)
                    print output[:70] + ' ' + colb

                    # Check if there are any currently running tasks that will conflict
                    ok_to_export = check_running_tasks(repo_result['label'], ename)

                    if ok_to_export:
                        # Trigger export on the repo
                        numfiles = export_iso(repo_result['id'], repo_result['label'], repo_result['relative_path'], last_export, export_type)

                        # Reset the export type to the user specified, in case we overrode it.
                        export_type = orig_export_type

                        # Update the export timestamp for this repo
                        export_times[repo_result['label']] = start_time
                        
                        # Add the repo to the successfully exported list
                        if numfiles != 0 or args.repodata:
                            msg = "Adding " + repo_result['label'] + " to export list"
                            helpers.log_msg(msg, 'DEBUG')
                            exported_repos.append(repo_result['label'])
                        else:
                            msg = "Not including repodata for empty repo " + repo_result['label']
                            helpers.log_msg(msg, 'DEBUG')

                else:
                    msg = "Skipping  " + repo_result['label']
                    helpers.log_msg(msg, 'DEBUG')



    # Combine resulting directory structures into a single repo format (top level = /content)
    prep_export_tree(org_name)

    # Now we need to process the on-disk export data.
    # Define the location of our exported data.
    export_dir = helpers.EXPORTDIR + "/export"

    # Write out the list of exported repos. This will be transferred to the disconnected system
    # and used to perform the repo sync tasks during the import.
    pickle.dump(exported_repos, open(export_dir + '/exported_repos.pkl', 'wb'))

    # Run GPG Checks on the exported RPMs
    if not args.nogpg:
        do_gpg_check(export_dir)

    # Add our exported data to a tarfile
    create_tar(export_dir, ename)

    # We're done. Write the start timestamp to file for next time
    os.chdir(script_dir)
    pickle.dump(export_times, open(vardir + '/exports_' + ename + '.pkl', "wb"))

    # And we're done!
    print helpers.GREEN + "Export complete.\n" + helpers.ENDC
    print 'Please transfer the contents of ' + helpers.EXPORTDIR + \
        ' to your disconnected Satellite system content import location.\n' \
        'Once transferred, please run ' + helpers.BOLD + ' sat_import' \
        + helpers.ENDC + ' to extract it.'
def main():
    """
    Main Routine
    """
    #pylint: disable-msg=R0912,R0914,R0915

    if not helpers.DISCONNECTED:
        msg = "Import cannot be run on the connected Satellite (Sync) host"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Log the fact we are starting
    msg = "------------- Content import started by " + runuser + " ----------------"
    helpers.log_msg(msg, 'INFO')

    # Check for sane input
    parser = argparse.ArgumentParser(description='Performs Import of Default Content View.')
    # pylint: disable=bad-continuation
    parser.add_argument('-o', '--org', help='Organization', required=True)
    parser.add_argument('-d', '--date', \
        help='Date/name of Import fileset to process (YYYY-MM-DD_NAME)', required=True)
    parser.add_argument('-n', '--nosync', help='Do not trigger a sync after extracting content',
        required=False, action="store_true")
    parser.add_argument('-r', '--remove', help='Remove input files after import has completed',
        required=False, action="store_true")
    args = parser.parse_args()

    # Set our script variables from the input args
    org_name = args.org
    expdate = args.date

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)

    # Figure out if we have the specified input fileset
    basename = get_inputfiles(expdate)

    # Cleanup from any previous imports
    os.system("rm -rf " + helpers.IMPORTDIR + "/{content,custom,listing,*.pkl}")

    # Extract the input files
    extract_content(basename)

    # Trigger a sync of the content into the Library
    if args.nosync:
        print helpers.GREEN + "Import complete.\n" + helpers.ENDC
        msg = "Repository sync was requested to be skipped"
        helpers.log_msg(msg, 'WARNING')
        print 'Please synchronise all repositories to make new content available for publishing.'
        delete_override = False
    else:
        # We need to figure out which repos to sync. This comes to us via a pickle containing
        # a list of repositories that were exported
        imported_repos = pickle.load(open('exported_repos.pkl', 'rb'))

        # Run a repo sync on each imported repo
        (task_id, delete_override) = sync_content(org_id, imported_repos)

        # Now we need to wait for the sync to complete
        helpers.wait_for_task(task_id, 'sync')

        print helpers.GREEN + "Import complete.\n" + helpers.ENDC
        print 'Please publish content views to make new content available.'

    if args.remove and not delete_override:
        msg = "Removing " + helpers.IMPORTDIR + "/sat6_export_" + expdate + "* input files"
        helpers.log_msg(msg, 'DEBUG')
#        os.system("rm -f " + helpers.IMPORTDIR + "/sat6_export_" + expdate) + "*"

    msg = "Import Complete"
    helpers.log_msg(msg, 'INFO')