Пример #1
0
def do_gpg_check(export_dir):
    """
    Find and GPG Check all RPM files
    """
    msg = "Checking GPG integrity of RPMs in " + export_dir
    helpers.log_msg(msg, 'INFO')
    print msg

    badrpms = []
    os.chdir(export_dir)
    for rpm in locate("*.rpm"):
        return_code = subprocess.call("rpm -K " + rpm, shell=True, stdout=open(os.devnull, 'wb'))

        # A non-zero return code indicates a GPG check failure.
        if return_code != 0:
            # For display purposes, strip the first 6 directory elements
            rpmnew = os.path.join(*(rpm.split(os.path.sep)[6:]))
            badrpms.append(rpmnew)

    # If we have any bad ones we need to fail the export.
    if len(badrpms) != 0:
        msg = "The following RPM's failed the GPG check.."
        helpers.log_msg(msg, 'ERROR')
        for badone in badrpms:
            msg = badone
            helpers.log_msg(msg, 'ERROR')
        msg = "------ Export Aborted ------"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)
    else:
        msg = "GPG check completed successfully"
        helpers.log_msg(msg, 'INFO')
        print helpers.GREEN + "GPG Check - Pass" + helpers.ENDC
Пример #2
0
def check_missing(imports, exports, dataset, fixhistory, vardir):
    """Find any datasets that have not been imported.

    Compare export history with import history to identify missed datasets.

    If fixhistory is passed in, saves previous imports in imports.pkl and exits.
    """
    missing = False

    if fixhistory:
        # Remove the last element (this import) before saving - we haven't imported yet!
        exports = exports[:-1]
        pickle.dump(exports, open(vardir + '/imports.pkl', "wb"))

        # Copy the current 'exporthistory' over the 'importhistory' to 'fix' current mismatches
        msg = "Saved export history as import history. Please re-run this import."
        helpers.log_msg(msg, 'INFO')
        print msg
        sys.exit(2)
    else:
        for ds in exports:
            if ds not in imports:
                if dataset not in ds:
                    msg = "Import dataset " + ds + " has not been imported"
                    helpers.log_msg(msg, 'WARNING')
                    missing = True

    return (missing)
Пример #3
0
def prep_export_tree(org_name):
    """
    Function to combine individual export directories into single export tree
    Export top level contains /content and /custom directories with 'listing'
    files through the tree.
    """
    msg = "Preparing export directory tree..."
    helpers.log_msg(msg, 'INFO')
    print msg
    devnull = open(os.devnull, 'wb')
    os.makedirs(helpers.EXPORTDIR + "/export")
    # Haven't found a nice python way to do this - yet...
    subprocess.call("cp -rp " + helpers.EXPORTDIR + "/" + org_name + "*/" + org_name + \
        "/Library/* " + helpers.EXPORTDIR + "/export", shell=True, stdout=devnull, stderr=devnull)
    # Remove original directores
    os.system("rm -rf " + helpers.EXPORTDIR + "/" + org_name + "*/")

    # We need to re-generate the 'listing' files as we will have overwritten some during the merge
    msg = "Rebuilding listing files..."
    helpers.log_msg(msg, 'INFO')
    print msg
    create_listing_file(helpers.EXPORTDIR + "/export")

    # pylint: disable=unused-variable
    for root, directories, filenames in os.walk(helpers.EXPORTDIR + "/export"):
        for subdir in directories:
            currentdir = os.path.join(root, subdir)
            create_listing_file(currentdir)
Пример #4
0
def export_puppet(last_export, export_type):
    """
    Export Puppet modules
    Takes the type (full/incr) and the date of the last run
    """
    PUPEXPORTDIR = helpers.EXPORTDIR + '/puppet'
    if not os.path.exists(PUPEXPORTDIR):
        print "Creating puppet export directory"
        os.makedirs(PUPEXPORTDIR)

    if export_type == 'full':
        msg = "Exporting all puppet modules"
    else:
        msg = "Exporting puppet modules from start date " + last_export
    helpers.log_msg(msg, 'INFO')

    if export_type == 'full':
        os.system("find -L /var/lib/pulp/published/puppet/http/repos -type f -exec cp --parents -Lrp {} " \
            + PUPEXPORTDIR + " \;")

    else:
        os.system('find -L /var/lib/pulp/published/puppet/http/repos -type f -newerct $(date +%Y-%m-%d -d "' \
            + last_export + '") -exec cp --parents -Lrp {} ' + PUPEXPORTDIR + ' \;')

    return
def get_cv(org_id, publish_list):
    """Get the content views"""

    # Query API to get all content views for our org
    cvs = helpers.get_json(
        helpers.KATELLO_API + "organizations/" + str(org_id) + "/content_views/")
    ver_list = {}
    ver_descr = {}
    ver_version = {}

    for cv_result in cvs['results']:
        # We will never publish the DOV
        if cv_result['name'] != "Default Organization View":

            # Handle specific includes and excludes
            if publish_list and cv_result['name'] not in publish_list:
                msg = "Skipping content view '" + cv_result['name'] + "'"
                helpers.log_msg(msg, 'DEBUG')
                continue

            # Get the ID of each Content View
            msg = "Processing content view '" + cv_result['name'] + "' " + str(cv_result['id'])
            helpers.log_msg(msg, 'DEBUG')

            # Find the next version of the view
            ver_list[cv_result['id']] = cv_result['id']
            ver_descr[cv_result['id']] = cv_result['name']
            ver_version[cv_result['id']] = cv_result['next_version']

    return ver_list, ver_descr, ver_version
Пример #6
0
def prep_export_tree(org_name):
    """
    Function to combine individual export directories into single export tree
    Export top level contains /content and /custom directories with 'listing'
    files through the tree.
    """
    msg = "Preparing export directory tree..."
    helpers.log_msg(msg, 'INFO')
    print msg
    devnull = open(os.devnull, 'wb')
    if not os.path.exists(helpers.EXPORTDIR + "/export"):
        os.makedirs(helpers.EXPORTDIR + "/export")
    # Haven't found a nice python way to do this - yet...
    subprocess.call("cp -rp " + helpers.EXPORTDIR + "/" + org_name + "*/" + org_name + \
        "/Library/* " + helpers.EXPORTDIR + "/export", shell=True, stdout=devnull, stderr=devnull)
    # Remove original directores
    os.system("rm -rf " + helpers.EXPORTDIR + "/" + org_name + "*/")

    # We need to re-generate the 'listing' files as we will have overwritten some during the merge
    msg = "Rebuilding listing files..."
    helpers.log_msg(msg, 'INFO')
    print msg
    create_listing_file(helpers.EXPORTDIR + "/export")

    # pylint: disable=unused-variable
    for root, directories, filenames in os.walk(helpers.EXPORTDIR + "/export"):
        for subdir in directories:
            currentdir = os.path.join(root, subdir)
            create_listing_file(currentdir)
Пример #7
0
def copy_to_pfserver(export_dir, pfserver, pfmodpath, pfuser):
    """Use rsync to copy the exported module tree to the puppet-forge-server instance."""
    target = pfuser + '@' + pfserver + ':' + pfmodpath
    msg = 'Copying puppet modules to ' + target + '\n'
    helpers.log_msg(msg, 'INFO')
    print msg
    os.system('rsync -avrzc ' + export_dir + '/* ' + target)
Пример #8
0
def export_puppet(last_export, export_type):
    """
    Export Puppet modules
    Takes the type (full/incr) and the date of the last run
    """
    PUPEXPORTDIR = helpers.EXPORTDIR + '/puppet'
    if not os.path.exists(PUPEXPORTDIR):
        print "Creating puppet export directory"
        os.makedirs(PUPEXPORTDIR)

    if export_type == 'full':
        msg = "Exporting all puppet modules"
    else:
        msg = "Exporting puppet modules from start date " + last_export
    helpers.log_msg(msg, 'INFO')

    if export_type == 'full':
        os.system("find -L /var/lib/pulp/published/puppet/http/repos -type f -exec cp --parents -Lrp {} " \
            + PUPEXPORTDIR + " \;")

    else:
        os.system('find -L /var/lib/pulp/published/puppet/http/repos -type f -newerct $(date +%Y-%m-%d -d "' \
            + last_export + '") -exec cp --parents -Lrp {} ' + PUPEXPORTDIR + ' \;')

    return
Пример #9
0
def get_cv(org_id, publish_list):
    """Get the content views"""

    # Query API to get all content views for our org
    cvs = helpers.get_json(
        helpers.KATELLO_API + "organizations/" + str(org_id) + "/content_views/")
    ver_list = {}
    ver_descr = {}
    ver_version = {}

    for cv_result in cvs['results']:
        # We will never publish the DOV
        if cv_result['name'] != "Default Organization View":

            # Handle specific includes and excludes
            if publish_list and cv_result['name'] not in publish_list:
                msg = "Skipping content view '" + cv_result['name'] + "'"
                helpers.log_msg(msg, 'DEBUG')
                continue

            # Get the ID of each Content View
            msg = "Processing content view '" + cv_result['name'] + "' " + str(cv_result['id'])
            helpers.log_msg(msg, 'DEBUG')

            # Find the next version of the view
            ver_list[cv_result['id']] = cv_result['id']
            ver_descr[cv_result['id']] = cv_result['name']
            ver_version[cv_result['id']] = cv_result['next_version']

    return ver_list, ver_descr, ver_version
def check_missing(imports, exports, dataset, fixhistory, vardir):
    """
    Compare export history with import history to find any datasets that have not been imported
    """
    missing = False

    if fixhistory:
        # Remove the last element (this import) before saving - we haven't imported yet!
        exports = exports[:-1]
        pickle.dump(exports, open(vardir + '/imports.pkl', "wb"))

        # Copy the current 'exporthistory' over the 'importhistory' to 'fix' current mismatches
        msg = "Saved export history as import history. Please re-run this import."
        helpers.log_msg(msg, 'INFO')
        print msg
        sys.exit(2)

    else:
        for ds in exports:
            if not ds in imports:
                if not dataset in ds:
                    msg = "Import dataset " + ds + " has not been imported"
                    helpers.log_msg(msg, 'WARNING')
                    missing = True

    return(missing)
Пример #11
0
def check_version_views(version_id):
    """
    Check if our version ID belongs to any views, including CCV
    """
    version_in_use = False
    version_in_ccv = False

    # Extract a list of content views that the CV version belongs to
    viewlist = helpers.get_json(helpers.KATELLO_API +
                                "content_view_versions/" + str(version_id))

    # If the list is not empty we need to return this fact. A CV that belongs
    # to NO versions will be a candidate for cleanup.
    viewlist['composite_content_view_ids']
    if viewlist['katello_content_views']:
        version_in_use = True
        msg = "Version " + str(
            viewlist['version']) + " is associated with published CV"
        helpers.log_msg(msg, 'DEBUG')

        # We can go further and see if this is associated with a CCV
        if viewlist['composite_content_view_ids']:
            version_in_ccv = True

    return version_in_use, version_in_ccv
Пример #12
0
def get_cv(org_id, target_env, env_list, prior_list, promote_list):
    """Get the content views"""
    # Find the ID of the environment we are promoting to and from
    if not target_env in env_list:
        msg = "Target environment '" + target_env + "' not found"
        helpers.log_msg(msg, 'ERROR')
        if helpers.MAILOUT:
            helpers.tf.seek(0)
            output = "{}".format(helpers.tf.read())
            helpers.mailout(helpers.MAILSUBJ_FP, output)
        sys.exit(1)
    else:
        target_env_id = env_list[target_env]
        source_env_id = prior_list[target_env_id]

    # Query API to get all content views for our org
    cvs = helpers.get_json(
        helpers.KATELLO_API + "organizations/" + str(org_id) + "/content_views/")
    ver_list = {}
    ver_descr = {}
    ver_version = {}

    for cv_result in cvs['results']:
        # We will never promote to/from the DOV
        if cv_result['name'] != "Default Organization View":

            # Handle specific includes and excludes
            if promote_list and cv_result['name'] not in promote_list:
                msg = "Skipping content view '" + cv_result['name'] + "'"
                helpers.log_msg(msg, 'DEBUG')
                continue

            # Get the ID of each Content View
            msg = "Processing content view '" + cv_result['name'] + "'"
            helpers.log_msg(msg, 'DEBUG')

            # Find the current version of the view in the env we are coming from
            for ver in cv_result['versions']:
                msg = "  Found in env_id " + str(ver['environment_ids']) + " view_id " +\
                    str(ver['id'])
                helpers.log_msg(msg, 'DEBUG')

                if source_env_id in ver['environment_ids']:
                    # Extract the name of the source environment so we can inform the user
                    for key, val in env_list.items():
                        if val == source_env_id:
                            prior_env = key
                    msg = "Found promotable version " + ver['version'] + " of '" +\
                        cv_result['name'] + "' in " + prior_env
                    helpers.log_msg(msg, 'INFO')
                    print msg

                    # Create a dictionary of CV IDs and the CV vers ID to promote
                    ver_list[cv_result['id']] = ver['id']
                    ver_descr[cv_result['id']] = cv_result['name']
                    ver_version[cv_result['id']] = ver['version']

    return ver_list, ver_descr, ver_version
def get_cv(org_id, target_env, env_list, prior_list, promote_list):
    """Get the content views"""
    # Find the ID of the environment we are promoting to and from
    if not target_env in env_list:
        msg = "Target environment '" + target_env + "' not found"
        helpers.log_msg(msg, 'ERROR')
        if helpers.MAILOUT:
            helpers.tf.seek(0)
            output = "{}".format(helpers.tf.read())
            helpers.mailout(helpers.MAILSUBJ_FP, output)
        sys.exit(1)
    else:
        target_env_id = env_list[target_env]
        source_env_id = prior_list[target_env_id]

    # Query API to get all content views for our org
    cvs = helpers.get_json(
        helpers.KATELLO_API + "organizations/" + str(org_id) + "/content_views/")
    ver_list = {}
    ver_descr = {}
    ver_version = {}

    for cv_result in cvs['results']:
        # We will never promote to/from the DOV
        if cv_result['name'] != "Default Organization View":

            # Handle specific includes and excludes
            if promote_list and cv_result['name'] not in promote_list:
                msg = "Skipping content view '" + cv_result['name'] + "'"
                helpers.log_msg(msg, 'DEBUG')
                continue

            # Get the ID of each Content View
            msg = "Processing content view '" + cv_result['name'] + "'"
            helpers.log_msg(msg, 'DEBUG')

            # Find the current version of the view in the env we are coming from
            for ver in cv_result['versions']:
                msg = "  Found in env_id " + str(ver['environment_ids']) + " view_id " +\
                    str(ver['id'])
                helpers.log_msg(msg, 'DEBUG')

                if source_env_id in ver['environment_ids']:
                    # Extract the name of the source environment so we can inform the user
                    for key, val in env_list.items():
                        if val == source_env_id:
                            prior_env = key
                    msg = "Found promotable version " + ver['version'] + " of '" +\
                        cv_result['name'] + "' in " + prior_env
                    helpers.log_msg(msg, 'INFO')
                    print msg

                    # Create a dictionary of CV IDs and the CV vers ID to promote
                    ver_list[cv_result['id']] = ver['id']
                    ver_descr[cv_result['id']] = cv_result['name']
                    ver_version[cv_result['id']] = ver['version']

    return ver_list, ver_descr, ver_version
def clean_cv(dryrun):
    print "Running Content View Cleanup..."

    if not dryrun:
        rc = subprocess.call(['/usr/local/bin/clean_content_views', '-a', '-c'])
    else:
        msg = "Dry run - not actually performing cleanup"
        helpers.log_msg(msg, 'WARNING')
        rc = subprocess.call(['/usr/local/bin/clean_content_views', '-a', '-c', '-d'])
Пример #15
0
def extract_content(basename):
    """Extract the tar archive."""
    os.chdir(helpers.IMPORTDIR)

    # Extract the archives (Using OS call for this at the moment)
    msg = "Extracting tarfiles"
    helpers.log_msg(msg, 'INFO')
    print msg
    os.system('cat ' + basename + '_* | tar xpf -')
Пример #16
0
def clean_cv(dryrun):
    print "Running Content View Cleanup..."

    if not dryrun:
        rc = subprocess.call(['/usr/local/bin/clean_content_views', '-a', '-c'])
    else:
        msg = "Dry run - not actually performing cleanup"
        helpers.log_msg(msg, 'WARNING')
        rc = subprocess.call(['/usr/local/bin/clean_content_views', '-a', '-c', '-d'])
Пример #17
0
def check_disk_space(export_type):
    """
    Check the disk usage of the pulp partition
    For a full export we need at least 50% free, as we spool to /var/lib/pulp.
    """
    pulp_used = str(helpers.disk_usage('/var/lib/pulp'))
    if export_type == 'full' and int(float(pulp_used)) > 50:
        msg = "Insufficient space in /var/lib/pulp for a full export. >50% free space is required."
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)
Пример #18
0
def check_disk_space(export_type):
    """
    Check the disk usage of the pulp partition
    For a full export we need at least 50% free, as we spool to /var/lib/pulp.
    """
    pulp_used = str(helpers.disk_usage('/var/lib/pulp'))
    if export_type == 'full' and int(float(pulp_used)) > 50:
        msg = "Insufficient space in /var/lib/pulp for a full export. >50% free space is required."
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)
Пример #19
0
def export_manifest():
    """
    Copies manifest downloaded by 'download_manifest.py' into the export bundle
    """
    if os.path.exists(helpers.EXPORTDIR + '/manifest'):
        msg = 'Found manifest to export'
        helpers.log_msg(msg, 'DEBUG')
        MFSTEXPORTDIR = helpers.EXPORTDIR + '/export/manifest'
        if not os.path.exists(MFSTEXPORTDIR):
            os.makedirs(MFSTEXPORTDIR)
        os.system('cp ' + helpers.EXPORTDIR + '/manifest/* ' + MFSTEXPORTDIR)
Пример #20
0
def extract_content(basename):
    """
    Extract the tar archive
    """
    os.chdir(helpers.IMPORTDIR)

    # Extract the archives (Using OS call for this at the moment)
    msg = "Extracting tarfiles"
    helpers.log_msg(msg, 'INFO')
    print msg
    os.system('cat ' + basename + '_* | tar xpf -')
Пример #21
0
def sync_content(org_id, imported_repos):
    """
    Synchronize the repositories
    Triggers a sync of all repositories belonging to the configured sync plan
    """
    repos_to_sync = []
    delete_override = False

    # Get a listing of repositories in this Satellite
    enabled_repos = helpers.get_p_json(
        helpers.KATELLO_API + "/repositories/", \
            json.dumps(
                {
                    "organization_id": org_id,
                }
            ))

    # Loop through each repo to be imported/synced
    for repo in imported_repos:
        do_import = False
        for repo_result in enabled_repos['results']:
            if repo in repo_result['label']:
                do_import = True
                repos_to_sync.append(repo_result['id'])
        if do_import:
            msg = "Repo " + repo + " found in Satellite"
            helpers.log_msg(msg, 'DEBUG')
        else:
            msg = "Repo " + repo + " is not enabled in Satellite"
            # If the repo is not enabled, don't delete the input files.
            # This gives the admin a chance to manually enable the repo and re-import
            delete_override = True
            helpers.log_msg(msg, 'WARNING')
            # TODO: We could go on here and try to enable the Red Hat repo .....

    msg = "Repo ids to sync: " + str(repos_to_sync)
    helpers.log_msg(msg, 'DEBUG')

    msg = "Syncing repositories"
    helpers.log_msg(msg, 'INFO')
    print msg
    task_id = helpers.post_json(
        helpers.KATELLO_API + "repositories/bulk/sync", \
            json.dumps(
                {
                    "ids": repos_to_sync,
                }
            ))["id"]
    msg = "Repo sync task id = " + task_id
    helpers.log_msg(msg, 'DEBUG')

    return task_id, delete_override
Пример #22
0
def get_cv(org_id):
    """
    Get the version of the Content Views
    There should only ever be ONE version of the Default Org View.
    It Should be v1.0 with id=1, but we're verifying here just in case.
    """

    # Query API to get all content views for our org
    cvs = helpers.get_json(
        helpers.KATELLO_API + "organizations/" + str(org_id) + "/content_views/")
    for cv_result in cvs['results']:
        if cv_result['name'] == "Default Organization View":
            msg = "CV Name: " + cv_result['name']
            helpers.log_msg(msg, 'DEBUG')

            # Find the current version of the view in the env we are coming from
            for ver in cv_result['versions']:
                msg = "  Env ID:     " + str(ver['environment_ids'])
                helpers.log_msg(msg, 'DEBUG')
                msg = "  Version:    " + str(ver['version'])
                helpers.log_msg(msg, 'DEBUG')
                msg = "  Version ID: " + str(ver['id'])
                helpers.log_msg(msg, 'DEBUG')

        # There will only ever be one DOV
        return cv_result['id']
Пример #23
0
def check_running_tasks():
    """
    Check for any currently running Sync or Export tasks
    Exits script if any Synchronize or Export tasks are found in a running state.
    """
    tasks = helpers.get_json(
        helpers.FOREMAN_API + "tasks/")

    # From the list of tasks, look for any running export or sync jobs.
    # If e have any we exit, as we can't export in this state.
    for task_result in tasks['results']:
        if task_result['state'] == 'running':
            if task_result['humanized']['action'] == 'Export':
                msg = "Unable to export - an Export task is already running"
                helpers.log_msg(msg, 'ERROR')
                sys.exit(-1)
            if task_result['humanized']['action'] == 'Synchronize':
                msg = "Unable to export - a Sync task is currently running"
                helpers.log_msg(msg, 'ERROR')
                sys.exit(-1)
        if task_result['state'] == 'paused':
            if task_result['humanized']['action'] == 'Export':
                msg = "Unable to export - an Export task is paused. Please resolve this issue first"
                helpers.log_msg(msg, 'ERROR')
                sys.exit(-1)
            if task_result['humanized']['action'] == 'Synchronize':
                msg = "Unable to export - a Sync task is paused. Resume any paused sync tasks."
                helpers.log_msg(msg, 'ERROR')
                sys.exit(-1)

    check_incomplete_sync()
Пример #24
0
def check_incomplete_sync():
    """
    Check for any sync tasks that are in an Incomplete state.
    These are not paused or locked, but are the orange 100% complete ones in the UI
    """
    repo_list = helpers.get_json(
        helpers.KATELLO_API + "/content_view_versions")

    # Extract the list of repo ids, then check the state of each one.
    incomplete_sync = False
    for repo in repo_list['results']:
        for repo_id in repo['repositories']:
            repo_status = helpers.get_json(
                helpers.KATELLO_API + "/repositories/" + str(repo_id['id']))

            if repo_status['content_type'] == 'puppet':
                if repo_status['last_sync']['state'] == 'stopped':
                    if repo_status['last_sync']['result'] == 'warning':
                        incomplete_sync = True
                        msg = "Repo ID " + str(repo_id['id']) + " Sync Incomplete"
                        helpers.log_msg(msg, 'DEBUG')

    # If we have detected incomplete sync tasks, ask the user if they want to export anyway.
    # This isn't fatal, but *MAY* lead to inconsistent repositories on the dieconnected sat.
    if incomplete_sync:
        msg = "Incomplete sync jobs detected"
        helpers.log_msg(msg, 'WARNING')
        answer = helpers.query_yes_no("Continue with export?", "no")
        if not answer:
            msg = "Export Aborted"
            helpers.log_msg(msg, 'ERROR')
            sys.exit(-1)
        else:
            msg = "Export continued by user"
            helpers.log_msg(msg, 'INFO')
Пример #25
0
def run_imports(dryrun, dayofweek, days):
    """Run imports of satellite content."""
    # If we are on an internet connected satellite, there will never be anything to import
    # In this case, we'll run the publish on Tuesday
    if helpers.DISCONNECTED == False:
        print "Internet connected, nothing to import"
        if dayofweek == days['Tue']:
            good_imports = True
        else:
            good_imports = False
        return good_imports

    print "Processing Imports..."

    # Find any sha256 files in the import dir
    infiles = glob.glob(helpers.IMPORTDIR + '/*.sha256')

    # Extract the dataset timestamp/name from the filename and add to a new list
    # Assumes naming standard   sat6_export_YYYYMMDD-HHMM_NAME.sha256
    # 'sorted' function should result in imports being done in correct order by filename
    tslist = []
    good_imports = False
    for f in sorted(infiles):
        dstime = f.split('_')[-2]
        dsname = (f.split('_')[-1]).split('.')[-2]
        tslist.append(dstime + '_' + dsname)

    if tslist:
        msg = 'Found import datasets on disk...\n' + '\n'.join(tslist)
    else:
        msg = 'No import datasets to process'
    helpers.log_msg(msg, 'INFO')
    print msg

    # Now for each import file in the list, run the import script in unattended mode:-)
    if tslist:
        if not dryrun:
            for dataset in tslist:
                rc = subprocess.call(
                    ['/usr/local/bin/sat_import', '-u', '-r', '-d', dataset])

                # If the import is successful
                if rc == 0:
                    good_imports = True

        else:
            msg = "Dry run - not actually performing import"
            helpers.log_msg(msg, 'WARNING')

    return good_imports
Пример #26
0
def get_cv(org_id):
    """
    Get the version of the Content Views
    There should only ever be ONE version of the Default Org View.
    It Should be v1.0 with id=1, but we're verifying here just in case.
    """

    # Query API to get all content views for our org
    cvs = helpers.get_json(helpers.KATELLO_API + "organizations/" +
                           str(org_id) + "/content_views/")
    for cv_result in cvs['results']:
        if cv_result['name'] == "Default Organization View":
            msg = "CV Name: " + cv_result['name']
            helpers.log_msg(msg, 'DEBUG')

            # Find the current version of the view in the env we are coming from
            for ver in cv_result['versions']:
                msg = "  Env ID:     " + str(ver['environment_ids'])
                helpers.log_msg(msg, 'DEBUG')
                msg = "  Version:    " + str(ver['version'])
                helpers.log_msg(msg, 'DEBUG')
                msg = "  Version ID: " + str(ver['id'])
                helpers.log_msg(msg, 'DEBUG')

        # There will only ever be one DOV
        return cv_result['id']
Пример #27
0
def get_inputfiles(expdate):
    """
    Verify the input files exist and are valid.
    'expdate' is a date (YYYY-MM-DD) provided by the user - date is in the filename of the archive
    Returned 'basename' is the full export filename (sat6_export_YYYY-MM-DD)
    """
    basename = 'sat6_export_' + expdate
    shafile = basename + '.sha256'
    if not os.path.exists(helpers.IMPORTDIR + '/' + basename + '.sha256'):
        msg = "Cannot continue - missing sha256sum file " + helpers.IMPORTDIR + '/' + shafile
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)

    # Verify the checksum of each part of the import
    os.chdir(helpers.IMPORTDIR)
    msg = 'Verifying Checksums in ' + helpers.IMPORTDIR + '/' + shafile
    helpers.log_msg(msg, 'INFO')
    print msg
    result = os.system('sha256sum -c ' + shafile)

    # Return code from sha256sum is 0 if all is fine.
    if result != 0:
        msg = "Import Aborted - Tarfile checksum verification failed"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)

    # We're good
    msg = "Tarfile checksum verification passed"
    helpers.log_msg(msg, 'INFO')
    print helpers.GREEN + "Checksum verification - Pass" + helpers.ENDC

    return basename
Пример #28
0
def get_inputfiles(dataset):
    """
    Verify the input files exist and are valid.
    'dataset' is a date (YYYY-MM-DD) provided by the user - date is in the filename of the archive
    Returned 'basename' is the full export filename (sat6_export_YYYY-MM-DD)
    """
    basename = 'sat6_export_' + dataset
    shafile = basename + '.sha256'
    if not os.path.exists(helpers.IMPORTDIR + '/' + basename + '.sha256'):
        msg = "Cannot continue - missing sha256sum file " + helpers.IMPORTDIR + '/' + shafile
        helpers.log_msg(msg, 'ERROR')
        sys.exit(1)

    # Verify the checksum of each part of the import
    os.chdir(helpers.IMPORTDIR)
    msg = 'Verifying Checksums in ' + helpers.IMPORTDIR + '/' + shafile
    helpers.log_msg(msg, 'INFO')
    print msg
    result = os.system('sha256sum -c ' + shafile)

    # Return code from sha256sum is 0 if all is fine.
    if result != 0:
        msg = "Import Aborted - Tarfile checksum verification failed"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(1)

    # We're good
    msg = "Tarfile checksum verification passed"
    helpers.log_msg(msg, 'INFO')
    print helpers.GREEN + "Checksum verification - Pass" + helpers.ENDC

    return basename
Пример #29
0
def check_running_tasks():
    """
    Check for any currently running Sync or Export tasks
    Exits script if any Synchronize or Export tasks are found in a running state.
    """
    tasks = helpers.get_json(helpers.FOREMAN_API + "tasks/")

    # From the list of tasks, look for any running export or sync jobs.
    # If e have any we exit, as we can't export in this state.
    for task_result in tasks['results']:
        if task_result['state'] == 'running':
            if task_result['humanized']['action'] == 'Export':
                msg = "Unable to export - an Export task is already running"
                helpers.log_msg(msg, 'ERROR')
                sys.exit(-1)
            if task_result['humanized']['action'] == 'Synchronize':
                msg = "Unable to export - a Sync task is currently running"
                helpers.log_msg(msg, 'ERROR')
                sys.exit(-1)
        if task_result['state'] == 'paused':
            if task_result['humanized']['action'] == 'Export':
                msg = "Unable to export - an Export task is paused. Please resolve this issue first"
                helpers.log_msg(msg, 'ERROR')
                sys.exit(-1)
            if task_result['humanized']['action'] == 'Synchronize':
                msg = "Unable to export - a Sync task is paused. Resume any paused sync tasks."
                helpers.log_msg(msg, 'ERROR')
                sys.exit(-1)

    check_incomplete_sync()
def promote(target_env, ver_list, ver_descr, ver_version, env_list, prior_list, dry_run):
    """Promote Content View"""
    target_env_id = env_list[target_env]
    source_env_id = prior_list[target_env_id]

    # Extract the name of the source environment so we can inform the user
    for key, val in env_list.items():
        if val == source_env_id:
            prior_env = key

    # Set the task name to be displayed in the task monitoring stage
    task_name = "Promotion from " + prior_env + " to " + target_env

    # Now we have all the info needed, we can actually trigger the promotion.
    # Loop through each CV with promotable versions
    task_list = []
    ref_list = {}

    # Catch scenario that no CV versions are found matching promotion criteria
    if not ver_list:
        msg = "No content view versions found matching promotion criteria"
        helpers.log_msg(msg, 'WARNING')
        sys.exit(1)

    for cvid in ver_list.keys():

        # Check if there is a publish/promote already running on this content view
        locked = helpers.check_running_publish(cvid, ver_descr[cvid])

        if not locked:
            msg = "Promoting '" + str(ver_descr[cvid]) + "' Version " + str(ver_version[cvid]) +\
                " from " + prior_env + " to " + str(target_env)
            helpers.log_msg(msg, 'INFO')
            print helpers.HEADER + msg + helpers.ENDC

        if not dry_run and not locked:
            try:
                task_id = helpers.post_json(
                    helpers.KATELLO_API + "content_view_versions/" + str(ver_list[cvid]) +\
                    "/promote/", json.dumps(
                        {
                            "environment_id": target_env_id
                        }
                        ))["id"]
            except Warning:
                msg = "Failed to initiate promotion of " + str(ver_descr[cvid])
                helpers.log_msg(msg, 'WARNING')
            else:
                task_list.append(task_id)
                ref_list[task_id] = ver_descr[cvid]

    # Exit in the case of a dry-run
    if dry_run:
        msg = "Dry run - not actually performing promotion"
        helpers.log_msg(msg, 'WARNING')
        sys.exit(2)


    return task_list, ref_list, task_name
def run_imports(dryrun,dayofweek,days):
    # If we are on an internet connected satellite, there will never be anything to import
    # In this case, we'll run the publish on Tuesday
    if helpers.DISCONNECTED == False:
        print "Internet connected, nothing to import"
        if dayofweek == days['Tue']:
            good_imports = True
        else:
            good_imports = False
        return good_imports

    print "Processing Imports..."

    # Find any sha256 files in the import dir
    infiles = glob.glob(helpers.IMPORTDIR + '/*.sha256')

    # Extract the dataset timestamp/name from the filename and add to a new list
    # Assumes naming standard   sat6_export_YYYYMMDD-HHMM_NAME.sha256
    # 'sorted' function should result in imports being done in correct order by filename
    tslist = []
    good_imports = False
    for f in sorted(infiles):
        dstime = f.split('_')[-2]
        dsname = (f.split('_')[-1]).split('.')[-2]
        tslist.append(dstime + '_' + dsname)

    if tslist:
        msg = 'Found import datasets on disk...\n' + '\n'.join(tslist)
    else:
        msg = 'No import datasets to process'
    helpers.log_msg(msg, 'INFO')
    print msg

    # Now for each import file in the list, run the import script in unattended mode:-)
    if tslist:
        if not dryrun:
            for dataset in tslist:
                rc = subprocess.call(['/usr/local/bin/sat_import', '-u', '-r', '-d', dataset])

                # If the import is successful
                if rc == 0:
                    good_imports = True

        else:
            msg = "Dry run - not actually performing import"
            helpers.log_msg(msg, 'WARNING')

    return good_imports
Пример #32
0
def export_iso(last_export, export_type):
    """
    Export ISO content modules
    Takes the type (full/incr) and the date of the last run
    """

    ISOEXPORTDIR = helpers.EXPORTDIR + '/iso'
    if not os.path.exists(ISOEXPORTDIR):
        print "Creating ISO export directory"
        os.makedirs(ISOEXPORTDIR)

    if export_type == 'full':
        msg = "Exporting all ISO content"
    else:
        msg = "Exporting ISO content from start date " + last_export
    helpers.log_msg(msg, 'INFO')

    if export_type == 'full':
        os.system("find -L /var/lib/pulp/published/http/isos -type f -exec cp --parents -Lrp {} " \
            + ISOEXPORTDIR + " \;")

    else:
        os.system('find -L /var/lib/pulp/published/http/isos -type f -newerct $(date +%Y-%m-%d -d "' \
            + last_export + '") -exec cp --parents -Lrp {} ' + ISOEXPORTDIR + ' \;')


    # At this point the iso/ export dir will contain individual repos - we need to 'normalise' them
    # This is a 'dirty' workaround, but puts the content where it is expected to be for importing.
    #
    # /.../Red_Hat_Enterprise_Linux_Server-Red_Hat_Enterprise_Linux_7_Server_ISOs_x86_64_7_2
    # => /.../content/dist/rhel/server/7/7.2/x86_64/iso

    for dirpath, subdirs, files in os.walk(ISOEXPORTDIR):
        for tdir in subdirs:
            if 'Red_Hat_Enterprise_Linux_7_Server_ISOs_x86_64_7_2' in tdir:
                INDIR = os.path.join(dirpath, tdir)
                OUTDIR = helpers.EXPORTDIR + '/content/dist/rhel/server/7/7.2/x86_64/iso'
            elif 'Red_Hat_Enterprise_Linux_6_Server_ISOs_x86_64_6_8' in tdir:
                INDIR = os.path.join(dirpath, tdir)
                OUTDIR = helpers.EXPORTDIR + '/content/dist/rhel/server/6/6.8/x86_64/iso'


            print INDIR + ' => ' + OUTDIR
            if not os.path.exists(OUTDIR):
                shutil.move(INDIR, OUTDIR)

    return
def promote_cv(dryrun, lifecycle):
    print "Running Content View Promotion to " + lifecycle + "..."

    # Set the initial state
    good_promote = False

    if not dryrun:
        rc = subprocess.call(['/usr/local/bin/promote_content_views', '-q', '-m', '-e', lifecycle])
    else:
        msg = "Dry run - not actually performing promotion"
        helpers.log_msg(msg, 'WARNING')
        rc = subprocess.call(['/usr/local/bin/promote_content_views', '-q', '-d', '-m', '-e', lifecycle])

    if rc == 0:
        good_promote = True

    return good_promote
def publish_cv(dryrun):
    print "Running Content View Publish..."

    # Set the initial state
    good_publish = False

    if not dryrun:
        rc = subprocess.call(['/usr/local/bin/publish_content_views', '-q', '-a', '-m'])
    else:
        msg = "Dry run - not actually performing publish"
        helpers.log_msg(msg, 'WARNING')
        rc = subprocess.call(['/usr/local/bin/publish_content_views', '-q', '-a', '-m', '-d'])

    if rc == 0:
        good_publish = True

    return good_publish
Пример #35
0
def promote_cv(dryrun, lifecycle):
    print "Running Content View Promotion to " + lifecycle + "..."

    # Set the initial state
    good_promote = False

    if not dryrun:
        rc = subprocess.call(['/usr/local/bin/promote_content_views', '-q', '-e', lifecycle])
    else:
        msg = "Dry run - not actually performing promotion"
        helpers.log_msg(msg, 'WARNING')
        rc = subprocess.call(['/usr/local/bin/promote_content_views', '-q', '-d', '-e', lifecycle])

    if rc == 0:
        good_promote = True

    return good_promote
Пример #36
0
def publish_cv(dryrun):
    print "Running Content View Publish..."

    # Set the initial state
    good_publish = False

    if not dryrun:
        rc = subprocess.call(['/usr/local/bin/publish_content_views', '-q', '-a'])
    else:
        msg = "Dry run - not actually performing publish"
        helpers.log_msg(msg, 'WARNING')
        rc = subprocess.call(['/usr/local/bin/publish_content_views', '-q', '-a', '-d'])

    if rc == 0:
        good_publish = True

    return good_publish
Пример #37
0
def check_incomplete_sync():
    """
    Check for any sync tasks that are in an Incomplete state.
    These are not paused or locked, but are the orange 100% complete ones in the UI
    """
    repo_list = helpers.get_json(
        helpers.KATELLO_API + "/content_view_versions")

    # Extract the list of repo ids, then check the state of each one.
    incomplete_sync = False
    for repo in repo_list['results']:
        for repo_id in repo['repositories']:
            repo_status = helpers.get_json(
                helpers.KATELLO_API + "/repositories/" + str(repo_id['id']))

            if repo_status['content_type'] == 'yum':
                if repo_status['last_sync'] is None:
                    if repo_status['url'] is None:
                        msg = "Repo ID " + str(repo_id['id']) + " No Sync Configured"
                        #helpers.log_msg(msg, 'DEBUG')
                elif repo_status['last_sync']['state'] == 'stopped':
                    if repo_status['last_sync']['result'] == 'warning':
                        incomplete_sync = True
                        msg = "Repo ID " + str(repo_id['id']) + " Sync Incomplete"
                        helpers.log_msg(msg, 'DEBUG')

    # If we have detected incomplete sync tasks, ask the user if they want to export anyway.
    # This isn't fatal, but *MAY* lead to inconsistent repositories on the dieconnected sat.
    if incomplete_sync:
        msg = "Incomplete sync jobs detected"
        helpers.log_msg(msg, 'WARNING')
        if not args.unattended:
            answer = helpers.query_yes_no("Continue with export?", "no")
            if not answer:
                msg = "Export Aborted"
                helpers.log_msg(msg, 'ERROR')
                sys.exit(3)
            else:
                msg = "Export continued by user"
                helpers.log_msg(msg, 'INFO')
        else:
            msg = "Export Aborted"
            helpers.log_msg(msg, 'ERROR')
            sys.exit(3)
Пример #38
0
def do_gpg_check(export_dir):
    """
    Find and GPG Check all RPM files
    """
    msg = "Checking GPG integrity of exported RPMs..."
    helpers.log_msg(msg, 'INFO')
    output = "{:<70}".format(msg)
    print output[:70],
    # Force the status message to be shown to the user
    sys.stdout.flush()

    badrpms = []
    os.chdir(export_dir)
    for rpm in locate("*.rpm"):
        return_code = subprocess.call("rpm -K " + rpm, shell=True, stdout=open(os.devnull, 'wb'))

        # A non-zero return code indicates a GPG check failure.
        if return_code != 0:
            # For display purposes, strip the first 6 directory elements
            rpmnew = os.path.join(*(rpm.split(os.path.sep)[6:]))
            badrpms.append(rpmnew)

    # If we have any bad ones we need to fail the export.
    if len(badrpms) != 0:
        print helpers.RED + "GPG Check FAILED" + helpers.ENDC
        msg = "The following RPM's failed the GPG check.."
        helpers.log_msg(msg, 'ERROR')
        for badone in badrpms:
            msg = badone
            helpers.log_msg(msg, 'ERROR')
        msg = "------ Export Aborted ------"
        helpers.log_msg(msg, 'INFO')
        if helpers.MAILOUT:
            helpers.tf.seek(0)
            output = "{}".format(helpers.tf.read())
            subject = "Satellite 6 export failure - GPG checksum failure"
            message = "GPG check of exported RPMs failed. Check logs for details\n\n" + output
            helpers.mailout(subject, message)
        sys.exit(1)
    else:
        msg = "GPG check completed successfully"
        helpers.log_msg(msg, 'INFO')
        print helpers.GREEN + "GPG Check - Pass" + helpers.ENDC
Пример #39
0
def publish(ver_list, ver_descr, ver_version, dry_run, runuser):
    """Publish Content View"""

    # Set the task name to be displayed in the task monitoring stage
    task_name = "Publish content view to Library"

    # Now we have all the info needed, we can actually trigger the publish.
    task_list = []
    ref_list = {}

    # Catch scenario that no CV versions are found matching publish criteria
    if not ver_list:
        msg = "No content view versions found matching publication criteria"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(1)

    for cvid in ver_list.keys():

        # Check if there is a publish/promote already running on this content view
        locked = helpers.check_running_publish(ver_list[cvid], ver_descr[cvid])

        if not locked:
            msg = "Publishing '" + str(ver_descr[cvid]) + "' Version " + str(ver_version[cvid]) + ".0"
            helpers.log_msg(msg, 'INFO')
            print helpers.HEADER + msg + helpers.ENDC

        # Set up the description that will be added to the published version
        description = "Published by " + runuser + "\n via API script"

        if not dry_run and not locked:
            try:
                task_id = helpers.post_json(
                    helpers.KATELLO_API + "content_views/" + str(ver_list[cvid]) +\
                    "/publish", json.dumps(
                        {
                            "description": description
                        }
                        ))["id"]
            except Warning:
                msg = "Failed to initiate publication of " + str(ver_descr[cvid])
                helpers.log_msg(msg, 'WARNING')
            else:
                task_list.append(task_id)
                ref_list[task_id] = ver_descr[cvid]

    # Exit in the case of a dry-run
    if dry_run:
        msg = "Dry run - not actually performing publish"
        helpers.log_msg(msg, 'WARNING')
        sys.exit(2)


    return task_list, ref_list, task_name
Пример #40
0
def export_cv(dov_ver, last_export, export_type):
    """
    Export Content View
    Takes the content view version and a start time (API 'since' value)
    """
    if export_type == 'full':
        msg = "Exporting complete DOV version " + str(dov_ver)
    else:
        msg = "Exporting DOV version " + str(dov_ver) + " from start date " + last_export
    helpers.log_msg(msg, 'INFO')

    try:
        if export_type == 'full':
            task_id = helpers.post_json(
                helpers.KATELLO_API + "content_view_versions/" + str(dov_ver) + "/export", \
                    json.dumps(
                        {
                        }
                    ))["id"]
        else:
            task_id = helpers.post_json(
                helpers.KATELLO_API + "content_view_versions/" + str(dov_ver) + "/export/", \
                    json.dumps(
                        {
                            "since": last_export,
                        }
                    ))["id"]
    except: # pylint: disable-msg=W0702
        msg = "Unable to start export - Conflicting Sync or Export already in progress"
        helpers.log_msg(msg, 'ERROR')
        if helpers.MAILOUT:
            helpers.tf.seek(0)
            output = "{}".format(helpers.tf.read())
            subject = "Satellite 6 export failure"
            helpers.mailout(subject, output)
        sys.exit(1)

    # Trap some other error conditions
    if "Required lock is already taken" in str(task_id):
        msg = "Unable to start export - Sync in progress"
        helpers.log_msg(msg, 'ERROR')
        if helpers.MAILOUT:
            helpers.tf.seek(0)
            output = "{}".format(helpers.tf.read())
            subject = "Satellite 6 export failure"
            helpers.mailout(subject, output)
        sys.exit(1)

    msg = "Export started, task_id = " + str(task_id)
    helpers.log_msg(msg, 'DEBUG')

    return str(task_id)
Пример #41
0
def get_envs(org_id):
    """Get list of environments for the given org"""
    envs = helpers.get_json(
        helpers.SAT_API + "organizations/" + str(org_id) + "/environments/")

    # ... and add them to a dictionary, with respective 'Prior' environment
    env_list = {}
    prior_list = {}
    for env in envs['results']:
        env_list[env['name']] = env['id']
        if env['name'] == "Library":
            prior = 0
        else:
            prior = env['prior']['id']
        prior_list[env['id']] = prior

        msg = "Found environment '" + env['name'] + "', env_id " + str(env['id']) +\
            " (prior_id " + str(prior) + ")"
        helpers.log_msg(msg, 'DEBUG')

    return env_list, prior_list
def get_envs(org_id):
    """Get list of environments for the given org"""
    envs = helpers.get_json(
        helpers.SAT_API + "organizations/" + str(org_id) + "/environments/")

    # ... and add them to a dictionary, with respective 'Prior' environment
    env_list = {}
    prior_list = {}
    for env in envs['results']:
        env_list[env['name']] = env['id']
        if env['name'] == "Library":
            prior = 0
        else:
            prior = env['prior']['id']
        prior_list[env['id']] = prior

        msg = "Found environment '" + env['name'] + "', env_id " + str(env['id']) +\
            " (prior_id " + str(prior) + ")"
        helpers.log_msg(msg, 'DEBUG')

    return env_list, prior_list
def push_puppet(dryrun):
    """
    Performs a push of puppet modules using the DEFAULT puppet-forge-server defined in the
    config.yml
    """
    print "Pushing puppet modules to puppet-forge server..."

    # Set the initial state
    good_puppetpush = False

    if not dryrun:
        rc = subprocess.call(['/usr/local/bin/push_puppetforge', '-r', 'puppet-forge'])

        # If the import is successful
        if rc == 0:
            good_puppetpush = True

    else:
        msg = "Dry run - not actually performing module push"
        helpers.log_msg(msg, 'WARNING')

    return good_puppetpush
Пример #44
0
def push_puppet(dryrun):
    """
    Performs a push of puppet modules using the DEFAULT puppet-forge-server defined in the
    config.yml
    """
    print "Pushing puppet modules to puppet-forge server..."

    # Set the initial state
    good_puppetpush = False

    if not dryrun:
        rc = subprocess.call(['/usr/local/bin/push_puppetforge', '-r', 'puppet-forge'])

        # If the import is successful
        if rc == 0:
            good_puppetpush = True

    else:
        msg = "Dry run - not actually performing module push"
        helpers.log_msg(msg, 'WARNING')

    return good_puppetpush
Пример #45
0
def run_imports(dryrun):
    print "Processing Imports..."

    # Find any sha256 files in the import dir
    infiles = glob.glob(helpers.IMPORTDIR + '/*.sha256')

    # Extract the dataset timestamp/name from the filename and add to a new list
    # Assumes naming standard   sat6_export_YYYYMMDD-HHMM_NAME.sha256
    # 'sorted' function should result in imports being done in correct order by filename
    tslist = []
    good_imports = False
    for f in sorted(infiles):
        dstime = f.split('_')[-2]
        dsname = (f.split('_')[-1]).split('.')[-2]
        tslist.append(dstime + '_' + dsname)

    if tslist:
        msg = 'Found import datasets on disk...\n' + '\n'.join(tslist)
    else:
        msg = 'No import datasets to process'
    helpers.log_msg(msg, 'INFO')
    print msg

    # Now for each import file in the list, run the import script in unattended mode:-)
    if tslist:
        if not dryrun:
            for dataset in tslist:
                rc = subprocess.call(
                    ['/usr/local/bin/sat_import', '-u', '-r', '-d', dataset])

                # If the import is successful
                if rc == 0:
                    good_imports = True

        else:
            msg = "Dry run - not actually performing import"
            helpers.log_msg(msg, 'WARNING')

    return good_imports
def check_version_views(version_id):
    """
    Check if our version ID belongs to any views, including CCV
    """
    version_in_use = False
    version_in_ccv = False

    # Extract a list of content views that the CV version belongs to
    viewlist = helpers.get_json(
        helpers.KATELLO_API + "content_view_versions/" + str(version_id))

    # If the list is not empty we need to return this fact. A CV that belongs
    # to NO versions will be a candidate for cleanup.
    viewlist['composite_content_view_ids']
    if viewlist['katello_content_views']:
        version_in_use = True
        msg = "Version " + str(viewlist['version']) + " is associated with published CV"
        helpers.log_msg(msg, 'DEBUG')

        # We can go further and see if this is associated with a CCV
        if viewlist['composite_content_view_ids']:
            version_in_ccv = True

    return version_in_use, version_in_ccv
Пример #47
0
def create_tar(export_dir, name, export_history):
    """
    Create a TAR of the content we have exported
    Creates a single tar, then splits into DVD size chunks and calculates
    sha256sum for each chunk.
    """
    today = datetime.datetime.strftime(datetime.datetime.now(), '%Y%m%d-%H%M')
    msg = "Creating TAR files..."
    helpers.log_msg(msg, 'INFO')
    print msg

    # Add this export to the export_history list
    fname = today + '_' + name
    export_history.append(fname)
    pickle.dump(export_history, open(vardir + '/exporthistory_' + name + '.pkl', 'wb'))
    pickle.dump(export_history, open(export_dir + '/exporthistory_' + name + '.pkl', 'wb'))

    os.chdir(export_dir)
    full_tarfile = helpers.EXPORTDIR + '/sat6_export_' + today + '_' + name
    short_tarfile = 'sat6_export_' + today + '_' + name
    with tarfile.open(full_tarfile, 'w') as archive:
        archive.add(os.curdir, recursive=True)

    # Get a list of all the RPM content we are exporting
    result = [y for x in os.walk(export_dir) for y in glob(os.path.join(x[0], '*.rpm'))]
    if result:
        f_handle = open(helpers.LOGDIR + '/export_' + today + '_' + name + '.log', 'a+')
        f_handle.write('-------------------\n')
        for rpm in result:
            m_rpm = os.path.join(*(rpm.split(os.path.sep)[6:]))
            f_handle.write(m_rpm + '\n')
        f_handle.close()

    # When we've tar'd up the content we can delete the export dir.
    os.chdir(helpers.EXPORTDIR)
    shutil.rmtree(export_dir)
    if os.path.exists(helpers.EXPORTDIR + "/iso"):
        shutil.rmtree(helpers.EXPORTDIR + "/iso")
    if os.path.exists(helpers.EXPORTDIR + "/puppet"):
        shutil.rmtree(helpers.EXPORTDIR + "/puppet")

    # Split the resulting tar into DVD size chunks & remove the original.
    msg = "Splitting TAR file..."
    helpers.log_msg(msg, 'INFO')
    print msg
    os.system("split -d -b 4200M " + full_tarfile + " " + full_tarfile + "_")
    os.remove(full_tarfile)

    # Temporary until pythonic method is done
    msg = "Calculating Checksums..."
    helpers.log_msg(msg, 'INFO')
    print msg
    os.system('sha256sum ' + short_tarfile + '_* > ' + short_tarfile + '.sha256')
Пример #48
0
def export_repo(repo_id, last_export, export_type):
    """
    Export individual repository
    Takes the repository id and a start time (API 'since' value)
    """
    if export_type == 'full':
        msg = "Exporting repository id " + str(repo_id)
    else:
        msg = "Exporting repository id " + str(
            repo_id) + " from start date " + last_export
    helpers.log_msg(msg, 'INFO')

    try:
        if export_type == 'full':
            task_id = helpers.post_json(
                helpers.KATELLO_API + "repositories/" + str(repo_id) + "/export", \
                    json.dumps(
                        {
                        }
                    ))["id"]
        else:
            task_id = helpers.post_json(
                helpers.KATELLO_API + "repositories/" + str(repo_id) + "/export/", \
                    json.dumps(
                        {
                            "since": last_export,
                        }
                    ))["id"]
    except:  # pylint: disable-msg=W0702
        msg = "Unable to start export - Conflicting Sync or Export already in progress"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)

    # Trap some other error conditions
    if "Required lock is already taken" in str(task_id):
        msg = "Unable to start export - Sync in progress"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)

    msg = "Export started, task_id = " + str(task_id)
    helpers.log_msg(msg, 'DEBUG')

    return str(task_id)
Пример #49
0
def export_cv(dov_ver, last_export, export_type):
    """
    Export Content View
    Takes the content view version and a start time (API 'since' value)
    """
    if export_type == 'full':
        msg = "Exporting complete DOV version " + str(dov_ver)
    else:
        msg = "Exporting DOV version " + str(dov_ver) + " from start date " + last_export
    helpers.log_msg(msg, 'INFO')

    try:
        if export_type == 'full':
            task_id = helpers.post_json(
                helpers.KATELLO_API + "content_view_versions/" + str(dov_ver) + "/export", \
                    json.dumps(
                        {
                        }
                    ))["id"]
        else:
            task_id = helpers.post_json(
                helpers.KATELLO_API + "content_view_versions/" + str(dov_ver) + "/export/", \
                    json.dumps(
                        {
                            "since": last_export,
                        }
                    ))["id"]
    except: # pylint: disable-msg=W0702
        msg = "Unable to start export - Conflicting Sync or Export already in progress"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)

    # Trap some other error conditions
    if "Required lock is already taken" in str(task_id):
        msg = "Unable to start export - Sync in progress"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)

    msg = "Export started, task_id = " + str(task_id)
    helpers.log_msg(msg, 'DEBUG')

    return str(task_id)
Пример #50
0
def export_repo(repo_id, last_export, export_type):
    """
    Export individual repository
    Takes the repository id and a start time (API 'since' value)
    """
    if export_type == 'full':
        msg = "Exporting repository id " + str(repo_id)
    else:
        msg = "Exporting repository id " + str(repo_id) + " from start date " + last_export
    helpers.log_msg(msg, 'INFO')

    try:
        if export_type == 'full':
            task_id = helpers.post_json(
                helpers.KATELLO_API + "repositories/" + str(repo_id) + "/export", \
                    json.dumps(
                        {
                        }
                    ))["id"]
        else:
            task_id = helpers.post_json(
                helpers.KATELLO_API + "repositories/" + str(repo_id) + "/export/", \
                    json.dumps(
                        {
                            "since": last_export,
                        }
                    ))["id"]
    except: # pylint: disable-msg=W0702
        msg = "Unable to start export - Conflicting Sync or Export already in progress"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)

    # Trap some other error conditions
    if "Required lock is already taken" in str(task_id):
        msg = "Unable to start export - Sync in progress"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)

    msg = "Export started, task_id = " + str(task_id)
    helpers.log_msg(msg, 'DEBUG')

    return str(task_id)
def get_inputfiles(dataset):
    """
    Verify the input files exist and are valid.
    'dataset' is a date (YYYYMMDD-HHMM_ENV) provided by the user - date is in the filename of the archive
    Returned 'basename' is the full export filename (sat6_export_YYYYMMDD-HHMM_ENV)
    """
    basename = 'sat6_export_' + dataset
    shafile = basename + '.sha256'
    if not os.path.exists(helpers.IMPORTDIR + '/' + basename + '.sha256'):
        msg = "Cannot continue - missing sha256sum file " + helpers.IMPORTDIR + '/' + shafile
        helpers.log_msg(msg, 'ERROR')
        if helpers.MAILOUT:
            helpers.tf.seek(0)
            output = "{}".format(helpers.tf.read())
            helpers.mailout(helpers.MAILSUBJ_FI, output)
        sys.exit(1)

    # Verify the checksum of each part of the import
    os.chdir(helpers.IMPORTDIR)
    msg = 'Verifying Checksums in ' + helpers.IMPORTDIR + '/' + shafile
    helpers.log_msg(msg, 'INFO')
    print msg
    result = os.system('sha256sum -c ' + shafile)

    # Return code from sha256sum is 0 if all is fine.
    if result != 0:
        msg = "Import Aborted - Tarfile checksum verification failed"
        helpers.log_msg(msg, 'ERROR')
        if helpers.MAILOUT:
            helpers.tf.seek(0)
            output = "{}".format(helpers.tf.read())
            helpers.mailout(helpers.MAILSUBJ_FI, output)
        sys.exit(1)

    # We're good
    msg = "Tarfile checksum verification passed"
    helpers.log_msg(msg, 'INFO')
    print helpers.GREEN + "Checksum verification - Pass" + helpers.ENDC

    return basename
Пример #52
0
def get_cv(org_id, cleanup_list, keep):
    """Get the content views"""

    # Query API to get all content views for our org
    cvs = helpers.get_p_json(
        helpers.KATELLO_API + "organizations/" + str(org_id) + "/content_views/",
        json.dumps({"per_page":"10000"})
    )
    ver_list = collections.OrderedDict()
    ver_descr = collections.OrderedDict()
    ver_keep = collections.OrderedDict()

    # Sort the CVS so that composites are considered first
    cv_results = sorted(cvs['results'], key=lambda k: k[u'composite'], reverse=True)

    for cv_result in cv_results:
        # We will never clean the DOV
        if cv_result['name'] != "Default Organization View":
            # Handle specific includes
            if cleanup_list:
                # The list contains dictionaries as elements. Process each dictionary
                for cv in cleanup_list:
                    # If the CV name does not appear in our config list, skip
                    if cv['view'] != cv_result['name']:
                        msg = "Skipping " + cv_result['name']
                        helpers.log_msg(msg, 'DEBUG')
                        continue
                    else:
                        msg = "Processing content view '" + cv_result['name'] + "' " \
                            + str(cv_result['id'])
                        helpers.log_msg(msg, 'DEBUG')

                        # Add the next version of the view, and how many versions to keep
                        ver_list[cv_result['id']] = cv_result['id']
                        ver_descr[cv_result['id']] = cv_result['name']
                        ver_keep[cv_result['id']] = cv['keep']

            # Handle the 'all' option
            else:
                msg = "Processing content view '" + cv_result['name'] + "' " \
                    + str(cv_result['id'])
                helpers.log_msg(msg, 'DEBUG')

                # Add the next version of the view, and how many versions to keep
                ver_list[cv_result['id']] = cv_result['id']
                ver_descr[cv_result['id']] = cv_result['name']
                ver_keep[cv_result['id']] = keep


    return ver_list, ver_descr, ver_keep
Пример #53
0
def check_disk_space(export_type,unattended):
    """
    Check the disk usage of the pulp partition
    For a full export we need at least 50% free, as we spool to /var/lib/pulp.
    """
    pulp_used = str(helpers.disk_usage('/var/lib/pulp'))
    if export_type == 'full' and int(float(pulp_used)) > 50:
        msg = "Insufficient space in /var/lib/pulp for a full export. >50% free space is required."
        helpers.log_msg(msg, 'WARNING')
        if not unattended:
            answer = helpers.query_yes_no("Continue with export?", "no")
            if not answer:
                msg = "Export Aborted"
                helpers.log_msg(msg, 'ERROR')
                sys.exit(3)
            else:
                msg = "Export continued by user"
                helpers.log_msg(msg, 'INFO')
        else:
            msg = "Export Aborted"
            helpers.log_msg(msg, 'ERROR')
            sys.exit(3)
def get_cv(org_id, cleanup_list, keep):
    """Get the content views"""

    # Query API to get all content views for our org
    cvs = helpers.get_json(
        helpers.KATELLO_API + "organizations/" + str(org_id) + "/content_views/")
    ver_list = collections.OrderedDict()
    ver_descr = collections.OrderedDict()
    ver_keep = collections.OrderedDict()

    # Sort the CVS so that composites are considered first
    cv_results = sorted(cvs['results'], key=lambda k: k[u'composite'], reverse=True)

    for cv_result in cv_results:
        # We will never clean the DOV
        if cv_result['name'] != "Default Organization View":
            # Handle specific includes
            if cleanup_list:
                # The list contains dictionaries as elements. Process each dictionary
                for cv in cleanup_list:
                    # If the CV name does not appear in our config list, skip
                    if cv['view'] != cv_result['name']:
                        msg = "Skipping " + cv_result['name']
                        helpers.log_msg(msg, 'DEBUG')
                        continue
                    else:
                        msg = "Processing content view '" + cv_result['name'] + "' " \
                            + str(cv_result['id'])
                        helpers.log_msg(msg, 'DEBUG')

                        # Add the next version of the view, and how many versions to keep
                        ver_list[cv_result['id']] = cv_result['id']
                        ver_descr[cv_result['id']] = cv_result['name']
                        ver_keep[cv_result['id']] = cv['keep']

            # Handle the 'all' option
            else:
                msg = "Processing content view '" + cv_result['name'] + "' " \
                    + str(cv_result['id'])
                helpers.log_msg(msg, 'DEBUG')

                # Add the next version of the view, and how many versions to keep
                ver_list[cv_result['id']] = cv_result['id']
                ver_descr[cv_result['id']] = cv_result['name']
                ver_keep[cv_result['id']] = keep


    return ver_list, ver_descr, ver_keep
Пример #55
0
def create_tar(export_dir, export_path):
    """
    Create a TAR of the content we have exported
    Creates a single tar, then splits into DVD size chunks and calculates
    sha256sum for each chunk.
    """
    today = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
    msg = "Creating TAR files..."
    helpers.log_msg(msg, 'INFO')
    print msg

    os.chdir(export_dir)
    full_tarfile = helpers.EXPORTDIR + '/puppet_export_' + today
    short_tarfile = 'puppet_export_' + today
    with tarfile.open(full_tarfile, 'w') as archive:
        archive.add(os.curdir, recursive=True)

    # Get a list of all the RPM content we are exporting
    result = [
        y for x in os.walk(export_dir)
        for y in glob(os.path.join(x[0], '*.tar.gz'))
    ]
    if result:
        f_handle = open(helpers.LOGDIR + '/puppet_export_' + today + '.log',
                        'a+')
        f_handle.write('-------------------\n')
        for module in result:
            m_module = os.path.join(*(module.split(os.path.sep)[4:]))
            f_handle.write(m_module + '\n')
        f_handle.close()

    # When we've tar'd up the content we can delete the export dir.
    os.chdir(helpers.EXPORTDIR)
    shutil.rmtree(export_path)

    # Split the resulting tar into DVD size chunks & remove the original.
    msg = "Splitting TAR file..."
    helpers.log_msg(msg, 'INFO')
    print msg
    os.system("split -d -b 4200M " + full_tarfile + " " + full_tarfile + "_")
    os.remove(full_tarfile)

    # Temporary until pythonic method is done
    msg = "Calculating Checksums..."
    helpers.log_msg(msg, 'INFO')
    print msg
    os.system('sha256sum ' + short_tarfile + '_* > ' + short_tarfile +
              '.sha256')
Пример #56
0
def create_tar(export_dir, name):
    """
    Create a TAR of the content we have exported
    Creates a single tar, then splits into DVD size chunks and calculates
    sha256sum for each chunk.
    """
    today = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
    msg = "Creating TAR files..."
    helpers.log_msg(msg, 'INFO')
    print msg

    os.chdir(export_dir)
    full_tarfile = helpers.EXPORTDIR + '/sat6_export_' + today + '_' + name
    short_tarfile = 'sat6_export_' + today + '_' + name
    with tarfile.open(full_tarfile, 'w') as archive:
        archive.add(os.curdir, recursive=True)

    # Get a list of all the RPM content we are exporting
    result = [y for x in os.walk(export_dir) for y in glob(os.path.join(x[0], '*.rpm'))]
    if result:
        f_handle = open(helpers.LOGDIR + '/export_' + today + '_' + name + '.log', 'a+')
        f_handle.write('-------------------\n')
        for rpm in result:
            m_rpm = os.path.join(*(rpm.split(os.path.sep)[6:]))
            f_handle.write(m_rpm + '\n')
        f_handle.close()

    # When we've tar'd up the content we can delete the export dir.
    os.chdir(helpers.EXPORTDIR)
    shutil.rmtree(export_dir)
    if os.path.exists(helpers.EXPORTDIR + "/iso"):
        shutil.rmtree(helpers.EXPORTDIR + "/iso")

    # Split the resulting tar into DVD size chunks & remove the original.
    msg = "Splitting TAR file..."
    helpers.log_msg(msg, 'INFO')
    print msg
    os.system("split -d -b 4200M " + full_tarfile + " " + full_tarfile + "_")
    os.remove(full_tarfile)

    # Temporary until pythonic method is done
    msg = "Calculating Checksums..."
    helpers.log_msg(msg, 'INFO')
    print msg
    os.system('sha256sum ' + short_tarfile + '_* > ' + short_tarfile + '.sha256')
def main(args):
    """
    Main routine
    """

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Check for sane input
    parser = argparse.ArgumentParser(
        description='Cleans content views for specified organization.')
    group = parser.add_mutually_exclusive_group()
    # pylint: disable=bad-continuation
    parser.add_argument('-o', '--org', help='Organization (Uses default if not specified)',
        required=False)
    parser.add_argument('-k', '--keep', help='How many old versions to keep (only used with -a)',
        required=False)
    group.add_argument('-a', '--all', help='Clean ALL content views', required=False,
        action="store_true")
    parser.add_argument('-c', '--cleanall', help='Remove orphan versions between in-use views',
        required=False, action="store_true")
    parser.add_argument('-i', '--ignorefirstpromoted', help='Version to keep count starts from first CV, not first promoted CV',
        required=False, action="store_true")
    parser.add_argument('-d', '--dryrun', help='Dry Run - Only show what will be cleaned',
        required=False, action="store_true")

    args = parser.parse_args()

    # Log the fact we are starting
    msg = "-------- Content view cleanup started by " + runuser + " -----------"
    helpers.log_msg(msg, 'INFO')

    # Set our script variables from the input args
    if args.org:
        org_name = args.org
    else:
       org_name = helpers.ORG_NAME
    dry_run = args.dryrun
    cleanall = args.cleanall
    ignorefirstpromoted = args.ignorefirstpromoted
    if args.keep:
        keep = args.keep
    else:
        keep = "0"

    cleanup_list = []
    if not args.all:
        cleanup_list = helpers.CONFIG['cleanup']['content_views']

        if not cleanup_list:
            msg = "Cannot find cleanup configuration"
            helpers.log_msg(msg, 'ERROR')
            sys.exit(1)

        msg = "Config found for CV's " + str(cleanup_list)
        helpers.log_msg(msg, 'DEBUG')

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)

    # Get the list of Content Views along with the latest view version in each environment
    (ver_list, ver_descr, ver_keep) = get_cv(org_id, cleanup_list, keep)

    # Clean the content views. Returns a list of task IDs.
    cleanup(ver_list, ver_descr, dry_run, runuser, ver_keep, cleanall, ignorefirstpromoted)

    # Exit cleanly
    sys.exit(0)
def check_running_tasks(clear):
    """
    Check for any currently running Sync tasks
    Checks for any Synchronize tasks in running/paused or Incomplete state.
    """
    #pylint: disable-msg=R0912,R0914,R0915
    # Clear the screen
    if clear:
        os.system('clear')

    print helpers.HEADER + "Checking for running/paused yum sync tasks..." + helpers.ENDC
    tasks = helpers.get_p_json(
        helpers.FOREMAN_API + "tasks/", \
            json.dumps(
                {
                    "per_page": "100",
                }
            ))

    # From the list of tasks, look for any running export or sync jobs.
    # If e have any we exit, as we can't export in this state.
    running_sync = 0
    for task_result in tasks['results']:
        if task_result['state'] == 'running' and task_result['label'] != 'Actions::BulkAction':
            if task_result['humanized']['action'] == 'Synchronize':
                running_sync = 1
                print helpers.BOLD + "Running: " + helpers.ENDC \
                    + task_result['input']['repository']['name']
        if task_result['state'] == 'paused' and task_result['label'] != 'Actions::BulkAction':
            if task_result['humanized']['action'] == 'Synchronize':
                running_sync = 1
                print helpers.ERROR + "Paused:  " + helpers.ENDC \
                    + task_result['input']['repository']['name']

    if not running_sync:
        print helpers.GREEN + "None detected" + helpers.ENDC


    # Check any repos marked as Sync Incomplete
    print helpers.HEADER + "\nChecking for incomplete (stopped) yum sync tasks..." + helpers.ENDC
    repo_list = helpers.get_json(
        helpers.KATELLO_API + "/content_view_versions")

    # Extract the list of repo ids, then check the state of each one.
    incomplete_sync = 0
    for repo in repo_list['results']:
        for repo_id in repo['repositories']:
            repo_status = helpers.get_json(
                helpers.KATELLO_API + "/repositories/" + str(repo_id['id']))

            if repo_status['content_type'] == 'yum':
                if repo_status['last_sync'] is None:
                    if repo_status['library_instance_id'] is None:
#                        incomplete_sync = 1
#                        print helpers.ERROR + "Broken Repo: " + helpers.ENDC + repo_status['name']
                        print helpers.WARNING + "Never Synchronized: " + helpers.ENDC + repo_status['name']
                elif repo_status['last_sync']['state'] == 'stopped':
                    if repo_status['last_sync']['result'] == 'warning':
                        incomplete_sync = 1
                        print helpers.WARNING + "Incomplete: " + helpers.ENDC + repo_status['name']
                    else:
                        msg = repo_status['name'] + " - last_sync: " + repo_status['last_sync']['ended_at']
                        helpers.log_msg(msg, 'DEBUG')

    # If we have detected incomplete sync tasks, ask the user if they want to export anyway.
    # This isn't fatal, but *MAY* lead to inconsistent repositories on the dieconnected sat.
    if not incomplete_sync:
        print helpers.GREEN + "No incomplete syncs detected\n" + helpers.ENDC
    else:
        print "\n"

    # Exit the loop if both tests are clear
    if not running_sync and not incomplete_sync:
        sys.exit(0)
def cleanup(ver_list, ver_descr, dry_run, runuser, ver_keep, cleanall, ignorefirstpromoted):
    """Clean Content Views"""

    # Set the task name to be displayed in the task monitoring stage
    task_name = "Cleanup content views"

    # Now we have all the info needed, we can actually trigger the cleanup.
    task_list = []
    ref_list = {}

    # Catch scenario that no CV versions are found matching cleanup criteria
    if not ver_list:
        msg = "No content view versions found matching cleanup criteria"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(1)

    for cvid in ver_list.keys():
        msg = "Cleaning content view '" + str(ver_descr[cvid]) + "'"
        helpers.log_msg(msg, 'INFO')
        print helpers.HEADER + msg + helpers.ENDC

        # Check if there is a publish/promote already running on this content view
        locked = helpers.check_running_publish(ver_list[cvid], ver_descr[cvid])
        if locked:
            continue

        # For the given content view we need to find the orphaned versions
        cvinfo = get_content_view_info(cvid)

        # Find the oldest published version
        version_list = []
        orphan_versions = []
        orphan_dict = {}
        all_versions = []
        ccv_versions = []
        for version in cvinfo['versions']:

            # Check if the version is part of a published view.
            # This is not returned in cvinfo, and we need to see if we are part of a CCV
            version_in_use, version_in_ccv = check_version_views(version['id'])

            # Build a list of ALL version numbers
            all_versions.append(float(version['version']))
            # Add any version numbers that are part of a CCV to a list
            if version_in_ccv:
                ccv_versions.append(float(version['version']))
            if not version['environment_ids']:
                # These are the versions that don't belong to an environment (i.e. orphans)
                # We also cross-check for versions that may be in a CCV here.
                # We add the version name and id into a dictionary so we can delete by id.
                if not version_in_use:
                    orphan_versions.append(float(version['version']))
                    orphan_dict[version['version']] = version['id']
                    continue
            else:
                msg = "Found version " + str(version['version'])
                helpers.log_msg(msg, 'DEBUG')
                # Add the version id to a list
                version_list.append(float(version['version']))

        # Find the oldest 'in use' version id
        if not version_list:
            msg = "No oldest in-use version found"
        else:
            lastver = min(version_list)
            msg = "Oldest in-use version is " + str(lastver)
        helpers.log_msg(msg, 'DEBUG')

        # Find the oldest 'NOT in use' version id
        if not orphan_versions:
            msg = "No oldest NOT-in-use version found"
        else:
            msg = "Oldest NOT-in-use version is " + str(min(orphan_versions))
        helpers.log_msg(msg, 'DEBUG')

        # Find the element position in the all_versions list of the oldest in-use version
        # e.g. vers 102.0 is oldest in-use and is element [5] in the all_versions list
        list_position = [i for i,x in enumerate(all_versions) if x == lastver]
        # Remove the number of views to keep from the element position of the oldest in-use
        # e.g. keep=2 results in an adjusted list element position [3]
        num_to_delete = list_position[0] - int(ver_keep[cvid])
        # Delete from position [0] to the first 'keep' position
        # e.g. first keep element is [3] so list of elements [0, 1, 2] is created
        list_pos_to_delete = [i for i in range(num_to_delete)]

        # Find versions to delete (based on keep parameter)
        # Make sure the version list is in order
        orphan_versions.sort()

        if cleanall:
            # Remove all orphaned versions
            todelete = orphan_versions
        elif ignorefirstpromoted:
            # Remove the last 'keep' elements from the orphans list (from PR #26)
            todelete = orphan_versions[:(len(orphan_versions) - int(ver_keep[cvid]))]
        else:
            todelete = []
            # Remove the element numbers for deletion from the list all versions
            for i in sorted(list_pos_to_delete, reverse=True):
                todelete.append(orphan_versions[i])

        msg = "Versions to remove: " + str(todelete)
        helpers.log_msg(msg, 'DEBUG')

        for version in all_versions:
            if not locked:
                if version in todelete:
                    msg = "Orphan view version " + str(version) + " found in '" +\
                        str(ver_descr[cvid]) + "'"
                    helpers.log_msg(msg, 'DEBUG')

                    # Lookup the version_id from our orphan_dict
                    delete_id = orphan_dict.get(str(version))

                    msg = "Removing version " + str(version)
                    helpers.log_msg(msg, 'INFO')
                    print helpers.HEADER + msg + helpers.ENDC
                else:
                    if version in ccv_versions:
                        msg = "Skipping delete of version " + str(version) + " (member of a CCV)"
                    elif version in orphan_versions:
                        msg = "Skipping delete of version " + str(version) + " (due to keep value)"
                    else:
                        msg = "Skipping delete of version " + str(version) + " (in use)"
                    helpers.log_msg(msg, 'INFO')
                    print msg
                    continue
            else:
                msg = "Version " + str(version) + " is locked"
                helpers.log_msg(msg, 'WARNING')
                continue

            # Delete the view version from the content view
            if not dry_run and not locked:
                try:
                    task_id = helpers.put_json(
                        helpers.KATELLO_API + "content_views/" + str(cvid) + "/remove/",
                        json.dumps(
                            {
                                "id": cvid,
                                "content_view_version_ids": delete_id
                            }
                            ))['id']

                    # Wait for the task to complete
                    helpers.wait_for_task(task_id,'clean')

                    # Check if the deletion completed successfully
                    tinfo = helpers.get_task_status(task_id)
                    if tinfo['state'] != 'running' and tinfo['result'] == 'success':
                        msg = "Removal of content view version OK"
                        helpers.log_msg(msg, 'INFO')
                        print helpers.GREEN + "OK" + helpers.ENDC
                    else:
                        msg = "Failed"
                        helpers.log_msg(msg, 'ERROR')

                except Warning:
                    msg = "Failed to initiate removal"
                    helpers.log_msg(msg, 'WARNING')

                except KeyError:
                    msg = "Failed to initiate removal (KeyError)"
                    helpers.log_msg(msg, 'WARNING')

    # Exit in the case of a dry-run
    if dry_run:
        msg = "Dry run - not actually performing removal"
        helpers.log_msg(msg, 'WARNING')
        sys.exit(2)
Пример #60
0
def main():
    """
    Main Routine
    """
    #pylint: disable-msg=R0912,R0914,R0915

    if helpers.DISCONNECTED:
        msg = "Export cannot be run on the disconnected Satellite host"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Log the fact we are starting
    msg = "------------- Puppet export started by " + runuser + " ----------------"
    helpers.log_msg(msg, 'INFO')

    # Check for sane input
    parser = argparse.ArgumentParser(description='Performs Export of Puppet modules.')
    group = parser.add_mutually_exclusive_group()
    # pylint: disable=bad-continuation
    parser.add_argument('-o', '--org', help='Organization', required=True)
    group.add_argument('-a', '--all', help='Export ALL puppet modules', required=False,
        action="store_true")
    group.add_argument('-i', '--incr', help='Incremental Export of puppet modules since last run',
        required=False, action="store_true")
    group.add_argument('-s', '--since', help='Export puppet modules since YYYY-MM-DD HH:MM:SS',
        required=False, type=helpers.valid_date)
    parser.add_argument('-l', '--last', help='Display time of last export', required=False,
        action="store_true")
    args = parser.parse_args()

    # Set our script variables from the input args
    org_name = args.org
    since = args.since

    # Record where we are running from
    script_dir = str(os.getcwd())

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)

    # Get the current time - this will be the 'last export' time if the export is OK
    start_time = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
    print "START: " + start_time

    # Get the last export date. If we're exporting all, this isn't relevant
    # If we are given a start date, use that, otherwise we need to get the last date from file
    # If there is no last export, we'll set an arbitrary start date to grab everything (2000-01-01)
    last_export = read_timestamp()
    export_type = 'incr'
    if args.all:
        print "Performing full puppet module export"
        export_type = 'full'
    else:
        if not since:
            if args.last:
                if last_export:
                    print "Last successful export was started at " + last_export
                else:
                    print "Export has never been performed"
                sys.exit(-1)
            if not last_export:
                print "No previous export recorded, performing full puppet module export"
                export_type = 'full'
        else:
            last_export = str(since)

            # We have our timestamp so we can kick of an incremental export
            print "Incremental export of puppet modules synchronised after " + last_export

    # TODO: Remove any previous exported content
#    os.chdir(helpers.EXPORTDIR)
#    shutil.rmtree()

    # Check if there are any currently running tasks that will conflict with an export
    check_running_tasks()

    # Now we have a CV ID and a starting date, and no conflicting tasks, we can export
    export_puppet(last_export, export_type)

    # Now we need to process the on-disk export data
    # Find the name of our export dir. This ASSUMES that the export dir is the ONLY dir.
    sat_export_dir = os.walk(helpers.EXPORTDIR).next()[1]
    export_path = sat_export_dir[0]

    # This portion finds the full directory tree of the Puppet repo, starting at the level
    # containing the Org_Name (/var/lib/pulp/published/puppet/http/repos/<org_name>/...)
    # pylint: disable=unused-variable
    for dirpath, subdirs, files in os.walk(helpers.EXPORTDIR):
        for tdir in subdirs:
            if org_name in tdir:
                export_dir = os.path.join(dirpath, tdir)

    # Add our exported data to a tarfile
    create_tar(export_dir, export_path)

    # We're done. Write the start timestamp to file for next time
    os.chdir(script_dir)
    write_timestamp(start_time)

    # And we're done!
    print helpers.GREEN + "Puppet module export complete.\n" + helpers.ENDC
    print 'Please transfer the contents of ' + helpers.EXPORTDIR + \
        ' to your disconnected puppet-forge server content location.\n'