示例#1
0
def main(args):
    """
    Main Routine
    """
    #pylint: disable-msg=R0912,R0914,R0915

    if helpers.DISCONNECTED:
        msg = "Export cannot be run on the disconnected Satellite host"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Set the base dir of the script and where the var data is
    global dir
    global vardir
    dir = os.path.dirname(__file__)
    vardir = os.path.join(dir, 'var')

    # Log the fact we are starting
    msg = "------------- Content export started by " + runuser + " ----------------"
    helpers.log_msg(msg, 'INFO')

    # Check for sane input
    parser = argparse.ArgumentParser(
        description='Performs Export of Default Content View.')
    group = parser.add_mutually_exclusive_group()
    # pylint: disable=bad-continuation
    parser.add_argument('-o',
                        '--org',
                        help='Organization (Uses default if not specified)',
                        required=False)
    parser.add_argument('-e',
                        '--env',
                        help='Environment config file',
                        required=False)
    group.add_argument('-a',
                       '--all',
                       help='Export ALL content',
                       required=False,
                       action="store_true")
    group.add_argument('-i',
                       '--incr',
                       help='Incremental Export of content since last run',
                       required=False,
                       action="store_true")
    group.add_argument('-s',
                       '--since',
                       help='Export content since YYYY-MM-DD HH:MM:SS',
                       required=False,
                       type=helpers.valid_date)
    parser.add_argument('-l',
                        '--last',
                        help='Display time of last export',
                        required=False,
                        action="store_true")
    parser.add_argument('-n',
                        '--nogpg',
                        help='Skip GPG checking',
                        required=False,
                        action="store_true")
    parser.add_argument('-r',
                        '--repodata',
                        help='Include repodata for repos with no new packages',
                        required=False,
                        action="store_true")
    args = parser.parse_args()

    # Set our script variables from the input args
    if args.org:
        org_name = args.org
    else:
        org_name = helpers.ORG_NAME
    since = args.since

    # Record where we are running from
    script_dir = str(os.getcwd())

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)
    exported_repos = []
    # If a specific environment is requested, find and read that config file
    repocfg = os.path.join(dir, 'config/' + args.env + '.yml')
    if args.env:
        if not os.path.exists(repocfg):
            print "ERROR: Config file " + repocfg + " not found."
            sys.exit(-1)
        cfg = yaml.safe_load(open(repocfg, 'r'))
        ename = args.env
        erepos = cfg["env"]["repos"]
        msg = "Specific environment export called for " + ename + ". Configured repos:"
        helpers.log_msg(msg, 'DEBUG')
        for repo in erepos:
            msg = "  - " + repo
            helpers.log_msg(msg, 'DEBUG')

    else:
        ename = 'DoV'
        label = 'DoV'
        msg = "DoV export called"
        helpers.log_msg(msg, 'DEBUG')

    # Get the current time - this will be the 'last export' time if the export is OK
    start_time = datetime.datetime.strftime(datetime.datetime.now(),
                                            '%Y-%m-%d %H:%M:%S')
    print "START: " + start_time + " (" + ename + " export)"

    # Read the last export date pickle for our selected repo group.
    export_times = read_pickle(ename)
    export_type = 'incr'

    if args.all:
        print "Performing full content export for " + ename
        export_type = 'full'
        since = False
    else:
        if not since:
            since = False
            if args.last:
                if export_times:
                    print "Last successful export for " + ename + ":"
                    for time in export_times:
                        repo = "{:<70}".format(time)
                        print repo[:70] + '\t' + str(export_times[time])
                else:
                    print "Export has never been performed for " + ename
                sys.exit(-1)
            if not export_times:
                print "No prior export recorded for " + ename + ", performing full content export"
                export_type = 'full'
        else:
            # Re-populate export_times dictionary so each repo has 'since' date
            since_export = str(since)

            # We have our timestamp so we can kick of an incremental export
            print "Incremental export of content for " + ename + " synchronised after " \
            + str(since)

    # Check the available space in /var/lib/pulp
    check_disk_space(export_type)

    # Remove any previous exported content left behind by prior unclean exit
    if os.path.exists(helpers.EXPORTDIR + '/export'):
        msg = "Removing existing export directory"
        helpers.log_msg(msg, 'DEBUG')
        shutil.rmtree(helpers.EXPORTDIR + '/export')

    # Collect a list of enabled repositories. This is needed for:
    # 1. Matching specific repo exports, and
    # 2. Running import sync per repo on the disconnected side
    repolist = helpers.get_p_json(
        helpers.KATELLO_API + "/repositories/", \
                json.dumps(
                        {
                           "organization_id": org_id,
                           "per_page": '1000',
                        }
                ))

    # If we are running a full DoV export we run a different set of API calls...
    if ename == 'DoV':
        cola = "Exporting DoV"
        if export_type == 'incr' and 'DoV' in export_times:
            last_export = export_times['DoV']
            if since:
                last_export = since_export
            colb = "(INCR since " + last_export + ")"
        else:
            export_type = 'full'
            last_export = '2000-01-01 12:00:00'  # This is a dummy value, never used.
            colb = "(FULL)"
        msg = cola + " " + colb
        helpers.log_msg(msg, 'INFO')
        output = "{:<70}".format(cola)
        print output[:70] + ' ' + colb

        # Check if there are any currently running tasks that will conflict with an export
        check_running_tasks(label, ename)

        # Get the version of the CV (Default Org View) to export
        dov_ver = get_cv(org_id)

        # Now we have a CV ID and a starting date, and no conflicting tasks, we can export
        export_id = export_cv(dov_ver, last_export, export_type)

        # Now we need to wait for the export to complete
        helpers.wait_for_task(export_id, 'export')

        # Check if the export completed OK. If not we exit the script.
        tinfo = helpers.get_task_status(export_id)
        if tinfo['state'] != 'running' and tinfo['result'] == 'success':
            msg = "Content View Export OK"
            helpers.log_msg(msg, 'INFO')
            print helpers.GREEN + msg + helpers.ENDC

            # Update the export timestamp for this repo
            export_times['DoV'] = start_time

            # Generate a list of repositories that were exported
            for repo_result in repolist['results']:
                if repo_result['content_type'] == 'yum':
                    # Add the repo to the successfully exported list
                    exported_repos.append(repo_result['label'])

        else:
            msg = "Content View Export FAILED"
            helpers.log_msg(msg, 'ERROR')
            sys.exit(-1)

    else:
        # Verify that defined repos exist in Satellite
        for repo in erepos:
            repo_in_sat = False
            for repo_x in repolist['results']:
                if re.findall("\\b" + repo + "\\b$", repo_x['label']):
                    repo_in_sat = True
                    break
            if repo_in_sat == False:
                msg = "'" + repo + "' not found in Satellite"
                helpers.log_msg(msg, 'WARNING')

        # Process each repo
        for repo_result in repolist['results']:
            if repo_result['content_type'] == 'yum':
                # If we have a match, do the export
                if repo_result['label'] in erepos:
                    # Extract the last export time for this repo
                    orig_export_type = export_type
                    cola = "Export " + repo_result['label']
                    if export_type == 'incr' and repo_result[
                            'label'] in export_times:
                        last_export = export_times[repo_result['label']]
                        if since:
                            last_export = since_export
                        colb = "(INCR since " + last_export + ")"
                    else:
                        export_type = 'full'
                        last_export = '2000-01-01 12:00:00'  # This is a dummy value, never used.
                        colb = "(FULL)"
                    msg = cola + " " + colb
                    helpers.log_msg(msg, 'INFO')
                    output = "{:<70}".format(cola)
                    print output[:70] + ' ' + colb

                    # Check if there are any currently running tasks that will conflict
                    ok_to_export = check_running_tasks(repo_result['label'],
                                                       ename)

                    if ok_to_export:
                        # Trigger export on the repo
                        export_id = export_repo(repo_result['id'], last_export,
                                                export_type)

                        # Now we need to wait for the export to complete
                        helpers.wait_for_task(export_id, 'export')

                        # Check if the export completed OK. If not we exit the script.
                        tinfo = helpers.get_task_status(export_id)
                        if tinfo['state'] != 'running' and tinfo[
                                'result'] == 'success':
                            # Count the number of exported packages
                            # First resolve the product label - this forms part of the export path
                            product = get_product(
                                org_id, repo_result['product']['cp_id'])
                            # Now we can build the export path itself
                            basepath = helpers.EXPORTDIR + "/" + org_name + "-" + product + "-" + repo_result[
                                'label']
                            if export_type == 'incr':
                                basepath = basepath + "-incremental"
                            exportpath = basepath + "/" + repo_result[
                                'relative_path']
                            msg = "\nExport path = " + exportpath
                            helpers.log_msg(msg, 'DEBUG')

                            os.chdir(exportpath)
                            numrpms = len([
                                f for f in os.walk(".").next()[2]
                                if f[-4:] == ".rpm"
                            ])

                            msg = "Repository Export OK (" + str(
                                numrpms) + " new packages)"
                            helpers.log_msg(msg, 'INFO')
                            print helpers.GREEN + msg + helpers.ENDC

                            # Update the export timestamp for this repo
                            export_times[repo_result['label']] = start_time

                            # Add the repo to the successfully exported list
                            if numrpms != 0 or args.repodata:
                                msg = "Adding " + repo_result[
                                    'label'] + " to export list"
                                helpers.log_msg(msg, 'DEBUG')
                                exported_repos.append(repo_result['label'])
                            else:
                                msg = "Not including repodata for empty repo " + repo_result[
                                    'label']
                                helpers.log_msg(msg, 'DEBUG')

                        else:
                            msg = "Export FAILED"
                            helpers.log_msg(msg, 'ERROR')

                        # Reset the export type to the user specified, in case we overrode it.
                        export_type = orig_export_type

                else:
                    msg = "Skipping  " + repo_result['label']
                    helpers.log_msg(msg, 'DEBUG')

            # Handle FILE type exports (ISO repos)
            elif repo_result['content_type'] == 'file':
                # If we have a match, do the export
                if repo_result['label'] in erepos:
                    # Extract the last export time for this repo
                    orig_export_type = export_type
                    cola = "Export " + repo_result['label']
                    if export_type == 'incr' and repo_result[
                            'label'] in export_times:
                        last_export = export_times[repo_result['label']]
                        if since:
                            last_export = since_export
                        colb = "(INCR since " + last_export + ")"
                    else:
                        export_type = 'full'
                        last_export = '2000-01-01 12:00:00'  # This is a dummy value, never used.
                        colb = "(FULL)"
                    msg = cola + " " + colb
                    helpers.log_msg(msg, 'INFO')
                    output = "{:<70}".format(cola)
                    print output[:70] + ' ' + colb

                    # Check if there are any currently running tasks that will conflict
                    ok_to_export = check_running_tasks(repo_result['label'],
                                                       ename)

                    if ok_to_export:
                        # Trigger export on the repo
                        numfiles = export_iso(repo_result['id'],
                                              repo_result['label'],
                                              repo_result['relative_path'],
                                              last_export, export_type)

                        # Reset the export type to the user specified, in case we overrode it.
                        export_type = orig_export_type

                        # Update the export timestamp for this repo
                        export_times[repo_result['label']] = start_time

                        # Add the repo to the successfully exported list
                        if numfiles != 0 or args.repodata:
                            msg = "Adding " + repo_result[
                                'label'] + " to export list"
                            helpers.log_msg(msg, 'DEBUG')
                            exported_repos.append(repo_result['label'])
                        else:
                            msg = "Not including repodata for empty repo " + repo_result[
                                'label']
                            helpers.log_msg(msg, 'DEBUG')

                else:
                    msg = "Skipping  " + repo_result['label']
                    helpers.log_msg(msg, 'DEBUG')

    # Combine resulting directory structures into a single repo format (top level = /content)
    prep_export_tree(org_name)

    # Now we need to process the on-disk export data.
    # Define the location of our exported data.
    export_dir = helpers.EXPORTDIR + "/export"

    # Write out the list of exported repos. This will be transferred to the disconnected system
    # and used to perform the repo sync tasks during the import.
    pickle.dump(exported_repos, open(export_dir + '/exported_repos.pkl', 'wb'))

    # Run GPG Checks on the exported RPMs
    if not args.nogpg:
        do_gpg_check(export_dir)

    # Add our exported data to a tarfile
    create_tar(export_dir, ename)

    # We're done. Write the start timestamp to file for next time
    os.chdir(script_dir)
    pickle.dump(export_times, open(vardir + '/exports_' + ename + '.pkl',
                                   "wb"))

    # And we're done!
    print helpers.GREEN + "Export complete.\n" + helpers.ENDC
    print 'Please transfer the contents of ' + helpers.EXPORTDIR + \
        ' to your disconnected Satellite system content import location.\n' \
        'Once transferred, please run ' + helpers.BOLD + ' sat_import' \
        + helpers.ENDC + ' to extract it.'
示例#2
0
def main(args):
    """
    Main routine
    """

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Set the base dir of the script and where the var data is
    global dir
    global vardir
    dir = os.path.dirname(__file__)
    vardir = os.path.join(dir, 'var')
    confdir = os.path.join(dir, 'config')

    # Check for sane input
    parser = argparse.ArgumentParser(
        description='Publishes content views for specified organization.')
    group = parser.add_mutually_exclusive_group()
    # pylint: disable=bad-continuation
    parser.add_argument('-o', '--org', help='Organization (Uses default if not specified)',
        required=False)
    group.add_argument('-a', '--all', help='Publish ALL content views', required=False,
        action="store_true")
    parser.add_argument('-d', '--dryrun', help='Dry Run - Only show what will be published',
        required=False, action="store_true")
    parser.add_argument('-l', '--last', help='Display last promotions', required=False,
        action="store_true")

    args = parser.parse_args()

    # Log the fact we are starting
    if not args.last:
        msg = "-------- Content view publish started by " + runuser + " -----------"
        helpers.log_msg(msg, 'INFO')

    # Set our script variables from the input args
    if args.org:
        org_name = args.org
    else:
       org_name = helpers.ORG_NAME
    dry_run = args.dryrun

    # Load the promotion history
    if not os.path.exists(vardir + '/promotions.pkl'):
        if not os.path.exists(vardir):
            os.makedirs(vardir)
        phistory = {}
    else:
        phistory = pickle.load(open(vardir + '/promotions.pkl', 'rb'))

    # Read the promotion history if --last requested
    if args.last:
        if phistory:
            print 'Last promotions:'
            for lenv, time in phistory.iteritems():
                print lenv, time
        else:
            print 'No promotions recorded'
        sys.exit(0)


    publish_list = []
    if not args.all:
        publish_list = helpers.CONFIG['publish']['content_views']

        if not publish_list:
            msg = "Cannot find publish configuration"
            helpers.log_msg(msg, 'ERROR')
            sys.exit(1)

        msg = "Config found for CV's " + str(publish_list)
        helpers.log_msg(msg, 'DEBUG')

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)

    # Get the list of Content Views along with the latest view version in each environment
    (ver_list, ver_descr, ver_version) = get_cv(org_id, publish_list)

    # Publish the content views. Returns a list of task IDs.
    (task_list, ref_list, task_name) = publish(ver_list, ver_descr, ver_version, dry_run, runuser)

    # Add/Update the promotion history dictionary so we can check when we last promoted
    phistory['Library'] = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
    pickle.dump(phistory, open(vardir + '/promotions.pkl', 'wb'))

    # Monitor the status of the publish tasks
    helpers.watch_tasks(task_list, ref_list, task_name)

    # Exit cleanly
    sys.exit(0)
示例#3
0
def main(args):
    """
    Main Routine
    """
    #pylint: disable-msg=R0912,R0914,R0915

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Set the base dir of the script and where the var data is
    global dir
    global vardir
    dir = os.path.dirname(__file__)
    vardir = os.path.join(dir, 'var')
    confdir = os.path.join(dir, 'config')

    # Check for sane input
    parser = argparse.ArgumentParser(
        description='Exports puppet modules in puppet-forge-server format.')
    # pylint: disable=bad-continuation
    parser.add_argument('-o',
                        '--org',
                        help='Organization (Uses default if not specified)',
                        required=False)
    parser.add_argument('-r',
                        '--repo',
                        help='Puppetforge repo label',
                        required=False)
    parser.add_argument(
        '-t',
        '--type',
        help='Puppetforge server type (puppet-forge-server|artifiactory)',
        required=False)
    parser.add_argument('-s',
                        '--server',
                        help='puppet-forge-server hostname',
                        required=False)
    parser.add_argument('-m',
                        '--modulepath',
                        help='path to puppet-forge-server modules',
                        required=False)
    parser.add_argument(
        '-u',
        '--user',
        help=
        'Username to push modules to server as (default is user running script)',
        required=False)
    parser.add_argument(
        '-p',
        '--password',
        help='Password (token) for username to push modules to Artifactory',
        required=False)
    args = parser.parse_args()

    # Set our script variables from the input args
    if args.org:
        org_name = args.org
    else:
        org_name = helpers.ORG_NAME

    # Define the type of puppet-forge server
    if args.type:
        pftype = args.type
    else:
        if not helpers.PFMETHOD:
            print "Puppet forge server type not specified"
            sys.exit(1)
        else:
            pftype = helpers.PFMETHOD

    # Define the puppet-forge-server hostname
    if args.server:
        pfserver = args.server
    else:
        if not helpers.PFSERVER:
            print "Puppet forge server not defined"
            sys.exit(1)
        else:
            pfserver = helpers.PFSERVER

    # Set the remote (puppet-forge-server) modules directory
    if args.modulepath:
        modpath = args.modulepath
    else:
        if not helpers.PFMODPATH:
            print "Puppet forge module path not defined"
            sys.exit(1)
        else:
            modpath = helpers.PFMODPATH

    # Set the username to use to push modules
    if args.user:
        pfuser = args.user
    else:
        pfuser = helpers.PFUSER

    # Read in the token for Artifiactory
    if args.password:
        pftoken = args.password
    else:
        pftoken = helpers.PFTOKEN

    # Record where we are running from
    script_dir = str(os.getcwd())

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)

    # Read the repo label given by the user
    if args.repo:
        pfrepo = args.repo
    else:
        print "Puppetforge repo not defined"
        sys.exit(1)

    # Remove any previous exported content left behind by prior unclean exit
    if os.path.exists(helpers.EXPORTDIR + '/export'):
        msg = "Removing existing export directory"
        helpers.log_msg(msg, 'DEBUG')
        shutil.rmtree(helpers.EXPORTDIR + '/export')

    # Collect a list of enabled repositories. This is needed for:
    # 1. Matching specific repo exports, and
    # 2. Running import sync per repo on the disconnected side
    repolist = helpers.get_p_json(
        helpers.KATELLO_API + "/repositories/", \
                json.dumps(
                        {
                           "organization_id": org_id,
                           "per_page": '1000',
                        }
                ))

    # Process each repo
    for repo_result in repolist['results']:
        if repo_result['content_type'] == 'puppet':
            # If we have a match, do the export
            if repo_result['label'] == pfrepo:

                # Trigger export on the repo
                numfiles = export_puppet(repo_result['id'],
                                         repo_result['label'],
                                         repo_result['relative_path'], 'full')

            else:
                msg = "Skipping  " + repo_result['label']
                helpers.log_msg(msg, 'DEBUG')

    # Now we need to process the on-disk export data.
    # Define the location of our exported data.
    export_dir = helpers.EXPORTDIR + "/puppetforge"

    if (pftype == 'puppet-forge-server'):
        # Method for posting to puppet-forge-server
        os.chdir(script_dir)
        copy_to_pfserver(export_dir, pfserver, modpath, pfuser)

    elif (pftype == 'artifactory'):
        # Method for posting to Artifactory repository
        for module in os.listdir(export_dir):
            print("Posing: " + module)
            postModule(module, export_dir, pfserver, modpath, pfuser, pftoken)

    else:
        print("Unknown puppet-forge server type defined")
        sys.exit(1)

    # And we're done!
    print helpers.GREEN + "Puppet Forge export complete.\n" + helpers.ENDC
    sys.exit(0)
示例#4
0
def main(args):
    """
    Main Routine
    """
    #pylint: disable-msg=R0912,R0914,R0915

    if not helpers.DISCONNECTED:
        msg = "Import cannot be run on the connected Satellite (Sync) host"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(1)

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Set the base dir of the script and where the var data is
    global dir
    global vardir
    dir = os.path.dirname(__file__)
    vardir = os.path.join(dir, 'var')

    # Log the fact we are starting
    msg = "------------- Content import started by " + runuser + " ----------------"
    helpers.log_msg(msg, 'INFO')

    # Check for sane input
    parser = argparse.ArgumentParser(
        description='Performs Import of Default Content View.')
    # pylint: disable=bad-continuation
    parser.add_argument('-o',
                        '--org',
                        help='Organization (Uses default if not specified)',
                        required=False)
    parser.add_argument('-d', '--dataset', \
        help='Date/name of Import dataset to process (YYYY-MM-DD_NAME)', required=False)
    parser.add_argument('-n',
                        '--nosync',
                        help='Do not trigger a sync after extracting content',
                        required=False,
                        action="store_true")
    parser.add_argument('-r',
                        '--remove',
                        help='Remove input files after import has completed',
                        required=False,
                        action="store_true")
    parser.add_argument('-l',
                        '--last',
                        help='Display the last successful import performed',
                        required=False,
                        action="store_true")
    parser.add_argument('-c',
                        '--count',
                        help='Display all package counts after import',
                        required=False,
                        action="store_true")
    args = parser.parse_args()

    # Set our script variables from the input args
    if args.org:
        org_name = args.org
    else:
        org_name = helpers.ORG_NAME
    dataset = args.dataset

    # Record where we are running from
    script_dir = str(os.getcwd())

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)

    # Display the last successful import
    if args.last:
        if os.path.exists(vardir + '/imports.pkl'):
            last_import = pickle.load(open(vardir + '/imports.pkl', 'rb'))
            msg = "Last successful import was " + last_import
            helpers.log_msg(msg, 'INFO')
            print msg
        else:
            msg = "Import has never been performed"
            helpers.log_msg(msg, 'INFO')
            print msg
        sys.exit(0)

    # If we got this far without -d being specified, error out cleanly
    if args.dataset is None:
        parser.error("--dataset is required")

    # Figure out if we have the specified input fileset
    basename = get_inputfiles(dataset)

    # Cleanup from any previous imports
    os.system("rm -rf " + helpers.IMPORTDIR +
              "/{content,custom,listing,*.pkl}")

    # Extract the input files
    extract_content(basename)

    # Trigger a sync of the content into the Library
    if args.nosync:
        #print helpers.GREEN + "Import complete.\n" + helpers.ENDC
        msg = "Repository sync was requested to be skipped"
        helpers.log_msg(msg, 'WARNING')
        print 'Please synchronise all repositories to make new content available for publishing.'
        delete_override = True
    else:
        # We need to figure out which repos to sync. This comes to us via a pickle containing
        # a list of repositories that were exported
        imported_repos = pickle.load(open('exported_repos.pkl', 'rb'))
        package_count = pickle.load(open('package_count.pkl', 'rb'))

        # Run a repo sync on each imported repo
        (delete_override) = sync_content(org_id, imported_repos)

        print helpers.GREEN + "Import complete.\n" + helpers.ENDC
        print 'Please publish content views to make new content available.'

        # Verify the repository package/erratum counts match the sync host
        check_counts(org_id, package_count, args.count)

    if os.path.exists(helpers.IMPORTDIR + '/puppetforge'):
        print 'Offline puppet-forge-server bundle is available to import seperately in '\
            + helpers.IMPORTDIR + '/puppetforge\n'

    if args.remove and not delete_override:
        msg = "Removing input files from " + helpers.IMPORTDIR
        helpers.log_msg(msg, 'INFO')
        print msg
        os.system("rm -f " + helpers.IMPORTDIR + "/sat6_export_" + dataset +
                  "*")
        os.system("rm -rf " + helpers.IMPORTDIR +
                  "/{content,custom,listing,*.pkl}")
        excode = 0
    elif delete_override:
        msg = "* Not removing input files due to incomplete sync *"
        helpers.log_msg(msg, 'INFO')
        print msg
        excode = 2
    else:
        msg = " (Removal of input files was not requested)"
        helpers.log_msg(msg, 'INFO')
        print msg
        excode = 0

    msg = "Import Complete"
    helpers.log_msg(msg, 'INFO')

    # Save the last completed import data
    os.chdir(script_dir)
    if not os.path.exists(vardir):
        os.makedirs(vardir)
    pickle.dump(dataset, open(vardir + '/imports.pkl', "wb"))

    # And exit.
    sys.exit(excode)
示例#5
0
def main():
    """
    Main Routine
    """
    #pylint: disable-msg=R0912,R0914,R0915

    if helpers.DISCONNECTED:
        msg = "Export cannot be run on the disconnected Satellite host"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Log the fact we are starting
    msg = "------------- Puppet export started by " + runuser + " ----------------"
    helpers.log_msg(msg, 'INFO')

    # Check for sane input
    parser = argparse.ArgumentParser(
        description='Performs Export of Puppet modules.')
    group = parser.add_mutually_exclusive_group()
    # pylint: disable=bad-continuation
    parser.add_argument('-o', '--org', help='Organization', required=True)
    group.add_argument('-a',
                       '--all',
                       help='Export ALL puppet modules',
                       required=False,
                       action="store_true")
    group.add_argument(
        '-i',
        '--incr',
        help='Incremental Export of puppet modules since last run',
        required=False,
        action="store_true")
    group.add_argument('-s',
                       '--since',
                       help='Export puppet modules since YYYY-MM-DD HH:MM:SS',
                       required=False,
                       type=helpers.valid_date)
    parser.add_argument('-l',
                        '--last',
                        help='Display time of last export',
                        required=False,
                        action="store_true")
    args = parser.parse_args()

    # Set our script variables from the input args
    org_name = args.org
    since = args.since

    # Record where we are running from
    script_dir = str(os.getcwd())

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)

    # Get the current time - this will be the 'last export' time if the export is OK
    start_time = datetime.datetime.strftime(datetime.datetime.now(),
                                            '%Y-%m-%d %H:%M:%S')
    print "START: " + start_time

    # Get the last export date. If we're exporting all, this isn't relevant
    # If we are given a start date, use that, otherwise we need to get the last date from file
    # If there is no last export, we'll set an arbitrary start date to grab everything (2000-01-01)
    last_export = read_timestamp()
    export_type = 'incr'
    if args.all:
        print "Performing full puppet module export"
        export_type = 'full'
    else:
        if not since:
            if args.last:
                if last_export:
                    print "Last successful export was started at " + last_export
                else:
                    print "Export has never been performed"
                sys.exit(-1)
            if not last_export:
                print "No previous export recorded, performing full puppet module export"
                export_type = 'full'
        else:
            last_export = str(since)

            # We have our timestamp so we can kick of an incremental export
            print "Incremental export of puppet modules synchronised after " + last_export

    # TODO: Remove any previous exported content


#    os.chdir(helpers.EXPORTDIR)
#    shutil.rmtree()

# Check if there are any currently running tasks that will conflict with an export
    check_running_tasks()

    # Now we have a CV ID and a starting date, and no conflicting tasks, we can export
    export_puppet(last_export, export_type)

    # Now we need to process the on-disk export data
    # Find the name of our export dir. This ASSUMES that the export dir is the ONLY dir.
    sat_export_dir = os.walk(helpers.EXPORTDIR).next()[1]
    export_path = sat_export_dir[0]

    # This portion finds the full directory tree of the Puppet repo, starting at the level
    # containing the Org_Name (/var/lib/pulp/published/puppet/http/repos/<org_name>/...)
    # pylint: disable=unused-variable
    for dirpath, subdirs, files in os.walk(helpers.EXPORTDIR):
        for tdir in subdirs:
            if org_name in tdir:
                export_dir = os.path.join(dirpath, tdir)

    # Add our exported data to a tarfile
    create_tar(export_dir, export_path)

    # We're done. Write the start timestamp to file for next time
    os.chdir(script_dir)
    write_timestamp(start_time)

    # And we're done!
    print helpers.GREEN + "Puppet module export complete.\n" + helpers.ENDC
    print 'Please transfer the contents of ' + helpers.EXPORTDIR + \
        ' to your disconnected puppet-forge server content location.\n'
示例#6
0
def main(args):
    """
    Main Routine
    """
    #pylint: disable-msg=R0912,R0914,R0915

    if not helpers.DISCONNECTED:
        msg = "Import cannot be run on the connected Satellite (Sync) host"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Set the base dir of the script and where the var data is
    global dir
    global vardir
    dir = os.path.dirname(__file__)
    vardir = os.path.join(dir, 'var')

    # Log the fact we are starting
    msg = "------------- Content import started by " + runuser + " ----------------"
    helpers.log_msg(msg, 'INFO')

    # Check for sane input
    parser = argparse.ArgumentParser(description='Performs Import of Default Content View.')
    # pylint: disable=bad-continuation
    parser.add_argument('-o', '--org', help='Organization (Uses default if not specified)', 
        required=False)
    parser.add_argument('-d', '--date', \
        help='Date/name of Import fileset to process (YYYY-MM-DD_NAME)', required=False)
    parser.add_argument('-n', '--nosync', help='Do not trigger a sync after extracting content',
        required=False, action="store_true")
    parser.add_argument('-r', '--remove', help='Remove input files after import has completed',
        required=False, action="store_true")
    parser.add_argument('-l', '--last', help='Display the last successful import performed', 
        required=False, action="store_true")
    args = parser.parse_args()

    # Set our script variables from the input args
    if args.org:
        org_name = args.org
    else:
        org_name = helpers.ORG_NAME
    expdate = args.date

    # Record where we are running from
    script_dir = str(os.getcwd())

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)

    # Display the last successful import
    if args.last:
        if os.path.exists(vardir + '/imports.pkl'):
            last_import = pickle.load(open(vardir + '/imports.pkl', 'rb'))
            msg = "Last successful import was " + last_import
            helpers.log_msg(msg, 'INFO')
            print msg
        else:
            msg = "Import has never been performed"
            helpers.log_msg(msg, 'INFO')
            print msg
        sys.exit(-1)
             
    # If we got this far without -d being specified, error out cleanly
    if args.date is None:
        parser.error("--date is required")


    # Figure out if we have the specified input fileset
    basename = get_inputfiles(expdate)

    # Cleanup from any previous imports
    os.system("rm -rf " + helpers.IMPORTDIR + "/{content,custom,listing,*.pkl}")

    # Extract the input files
    extract_content(basename)

    # Trigger a sync of the content into the Library
    if args.nosync:
        #print helpers.GREEN + "Import complete.\n" + helpers.ENDC
        msg = "Repository sync was requested to be skipped"
        helpers.log_msg(msg, 'WARNING')
        print 'Please synchronise all repositories to make new content available for publishing.'
        delete_override = True
    else:
        # We need to figure out which repos to sync. This comes to us via a pickle containing
        # a list of repositories that were exported
        imported_repos = pickle.load(open('exported_repos.pkl', 'rb'))

        # Run a repo sync on each imported repo
        (delete_override) = sync_content(org_id, imported_repos)

        print helpers.GREEN + "Import complete.\n" + helpers.ENDC
        print 'Please publish content views to make new content available.'

    if args.remove and not delete_override:
        msg = "Removing input files from " + helpers.IMPORTDIR
        helpers.log_msg(msg, 'INFO')
        print msg
        os.system("rm -f " + helpers.IMPORTDIR + "/sat6_export_" + expdate + "*")
        os.system("rm -rf " + helpers.IMPORTDIR + "/{content,custom,listing,*.pkl}")
    elif delete_override:
        msg = "* Not removing input files due to incomplete sync *"
        helpers.log_msg(msg, 'INFO')
        print msg
    else:
        msg = " (Removal of input files was not requested)"
        helpers.log_msg(msg, 'INFO')
        print msg

    msg = "Import Complete"
    helpers.log_msg(msg, 'INFO')

    # Save the last completed import data
    os.chdir(script_dir)
    if not os.path.exists(vardir):
        os.makedirs(vardir)
    pickle.dump(expdate, open(vardir + '/imports.pkl', "wb"))
def main():
    """
    Main Routine
    """
    #pylint: disable-msg=R0912,R0914,R0915

    if helpers.DISCONNECTED:
        msg = "Export cannot be run on the disconnected Satellite host"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Log the fact we are starting
    msg = "------------- Content export started by " + runuser + " ----------------"
    helpers.log_msg(msg, 'INFO')

    # Check for sane input
    parser = argparse.ArgumentParser(description='Performs Export of Default Content View.')
    group = parser.add_mutually_exclusive_group()
    # pylint: disable=bad-continuation
    parser.add_argument('-o', '--org', help='Organization', required=True)
    parser.add_argument('-e', '--env', help='Environment config file', required=False)
    group.add_argument('-a', '--all', help='Export ALL content', required=False,
        action="store_true")
    group.add_argument('-i', '--incr', help='Incremental Export of content since last run',
        required=False, action="store_true")
    group.add_argument('-s', '--since', help='Export content since YYYY-MM-DD HH:MM:SS',
        required=False, type=helpers.valid_date)
    parser.add_argument('-l', '--last', help='Display time of last export', required=False,
        action="store_true")
    args = parser.parse_args()

    # Set our script variables from the input args
    org_name = args.org
    since = args.since

    # Record where we are running from
    script_dir = str(os.getcwd())

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)
    exported_repos = []
    # If a specific environment is requested, find and read that config file
    if args.env:
        if not os.path.exists('config/' + args.env + '.yml'):
            print "ERROR: Config file 'config/" + args.env + ".yml' not found."
            sys.exit(-1)
        cfg = yaml.safe_load(open("config/" + args.env + ".yml", 'r'))
        ename = args.env
        erepos = cfg["env"]["repos"]
        msg = "Specific environment export called for " + ename + ". Configured repos:"
        helpers.log_msg(msg, 'DEBUG')
        for repo in erepos:
            msg = "  - " + repo
            helpers.log_msg(msg, 'DEBUG')

    else:
        ename = 'DoV'
        label = 'DoV'
        msg = "DoV export called"
        helpers.log_msg(msg, 'DEBUG')

    # Get the current time - this will be the 'last export' time if the export is OK
    start_time = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
    print "START: " + start_time + " (" + ename + " export)"

    # Read the last export date pickle for our selected repo group.
    export_times = read_pickle(ename)
    export_type = 'incr'

    if args.all:
        print "Performing full content export for " + ename
        export_type = 'full'
        since = False
    else:
        if not since:
            since = False
            if args.last:
                if export_times:
                    print "Last successful export for " + ename + ":"
                    for time in export_times:
                        print str(time) + '\t' + str(export_times[time])
                else:
                    print "Export has never been performed for " + ename
                sys.exit(-1)
            if not export_times:
                print "No prior export recorded for " + ename + ", performing full content export"
                export_type = 'full'
        else:
            # TODO: Re-populate export_times dictionary so each repo has 'since' date
            since = True
            since_export = str(since)

            # We have our timestamp so we can kick of an incremental export
            print "Incremental export of content for " + ename + " synchronised after " \
            + str(since)

    # Check the available space in /var/lib/pulp
    check_disk_space(export_type)

    # TODO: Remove any previous exported content
#    os.chdir(helpers.EXPORTDIR)
#    shutil.rmtree()


    # Collect a list of enabled repositories. This is needed for:
    # 1. Matching specific repo exports, and
    # 2. Running import sync per repo on the disconnected side
    repolist = helpers.get_p_json(
        helpers.KATELLO_API + "/repositories/", \
                json.dumps(
                        {
                           "organization_id": org_id,
                           "per_page": '1000',
                        }
                ))

    # If we are running a full DoV export we run a different set of API calls...
    if ename == 'DoV':
        if export_type == 'incr' and 'DoV' in export_times:
            last_export = export_times['DoV']
            if since:
                last_export = since_export
            msg = "Exporting DoV (INCR since " + last_export + ")"
        else:
            export_type = 'full'
            last_export = '2000-01-01 12:00:00' # This is a dummy value, never used.
            msg = "Exporting DoV (FULL)"
        helpers.log_msg(msg, 'INFO')
        print msg

        # Check if there are any currently running tasks that will conflict with an export
        check_running_tasks(label, ename)

        # Get the version of the CV (Default Org View) to export
        dov_ver = get_cv(org_id)

        # Now we have a CV ID and a starting date, and no conflicting tasks, we can export
        export_id = export_cv(dov_ver, last_export, export_type)

        # Now we need to wait for the export to complete
        helpers.wait_for_task(export_id, 'export')

        # Check if the export completed OK. If not we exit the script.
        tinfo = helpers.get_task_status(export_id)
        if tinfo['state'] != 'running' and tinfo['result'] == 'success':
            msg = "Content View Export OK"
            helpers.log_msg(msg, 'INFO')
            print helpers.GREEN + msg + helpers.ENDC

            # Update the export timestamp for this repo
            export_times['DoV'] = start_time

            # Generate a list of repositories that were exported
            for repo_result in repolist['results']:
                if repo_result['content_type'] == 'yum':
                    # Add the repo to the successfully exported list
                    exported_repos.append(repo_result['label'])

        else:
            msg = "Content View Export FAILED"
            helpers.log_msg(msg, 'ERROR')
            sys.exit(-1)

    else:
        # Verify that defined repos exist in our DoV
        for repo_result in repolist['results']:
            if repo_result['content_type'] == 'yum':
                # If we have a match, do the export
                if repo_result['label'] in erepos:
                    # Extract the last export time for this repo
                    if export_type == 'incr' and repo_result['label'] in export_times:
                        last_export = export_times[repo_result['label']]
                        if since:
                            last_export = since_export
                        msg = "Exporting " + repo_result['label'] \
                            + " (INCR since " + last_export + ")"
                    else:
                        export_type = 'full'
                        last_export = '2000-01-01 12:00:00' # This is a dummy value, never used.
                        msg = "Exporting " + repo_result['label'] + "(FULL)"
                    helpers.log_msg(msg, 'INFO')
                    print msg

                    # Check if there are any currently running tasks that will conflict
                    ok_to_export = check_running_tasks(repo_result['label'], ename)

                    if ok_to_export:
                        # Trigger export on the repo
                        export_id = export_repo(repo_result['id'], last_export, export_type)

                        # Now we need to wait for the export to complete
                        helpers.wait_for_task(export_id, 'export')

                        # Check if the export completed OK. If not we exit the script.
                        tinfo = helpers.get_task_status(export_id)
                        if tinfo['state'] != 'running' and tinfo['result'] == 'success':
                            msg = "Repository Export OK"
                            helpers.log_msg(msg, 'INFO')
                            print helpers.GREEN + msg + helpers.ENDC

                            # Update the export timestamp for this repo
                            export_times[repo_result['label']] = start_time

                            # Add the repo to the successfully exported list
                            exported_repos.append(repo_result['label'])
                        else:
                            msg = "Export FAILED"
                            helpers.log_msg(msg, 'ERROR')


                else:
                    msg = "Skipping  " + repo_result['label']
                    helpers.log_msg(msg, 'DEBUG')


    # Combine resulting directory structures into a single repo format (top level = /content)
    prep_export_tree(org_name)

    # Now we need to process the on-disk export data.
    # Define the location of our exported data.
    export_dir = helpers.EXPORTDIR + "/export"

    # Write out the list of exported repos. This will be transferred to the disconnected system
    # and used to perform the repo sync tasks during the import.
    pickle.dump(exported_repos, open(export_dir + '/exported_repos.pkl', 'wb'))

    # Run GPG Checks on the exported RPMs
    do_gpg_check(export_dir)

    # Add our exported data to a tarfile
    create_tar(export_dir, ename)

    # We're done. Write the start timestamp to file for next time
    os.chdir(script_dir)
    pickle.dump(export_times, open('var/exports_' + ename + '.pkl', "wb"))

    # And we're done!
    print helpers.GREEN + "Export complete.\n" + helpers.ENDC
    print 'Please transfer the contents of ' + helpers.EXPORTDIR + \
        'to your disconnected Satellite system content import location.\n' \
        'Once transferred, please run ' + helpers.BOLD + ' sat_import' \
        + helpers.ENDC + ' to extract it.'
def main():
    """
    Main Routine
    """
    #pylint: disable-msg=R0912,R0914,R0915

    if helpers.DISCONNECTED:
        msg = "Export cannot be run on the disconnected Satellite host"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Log the fact we are starting
    msg = "------------- Puppet export started by " + runuser + " ----------------"
    helpers.log_msg(msg, 'INFO')

    # Check for sane input
    parser = argparse.ArgumentParser(description='Performs Export of Puppet modules.')
    group = parser.add_mutually_exclusive_group()
    # pylint: disable=bad-continuation
    parser.add_argument('-o', '--org', help='Organization', required=True)
    group.add_argument('-a', '--all', help='Export ALL puppet modules', required=False,
        action="store_true")
    group.add_argument('-i', '--incr', help='Incremental Export of puppet modules since last run',
        required=False, action="store_true")
    group.add_argument('-s', '--since', help='Export puppet modules since YYYY-MM-DD HH:MM:SS',
        required=False, type=helpers.valid_date)
    parser.add_argument('-l', '--last', help='Display time of last export', required=False,
        action="store_true")
    args = parser.parse_args()

    # Set our script variables from the input args
    org_name = args.org
    since = args.since

    # Record where we are running from
    script_dir = str(os.getcwd())

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)

    # Get the current time - this will be the 'last export' time if the export is OK
    start_time = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
    print "START: " + start_time

    # Get the last export date. If we're exporting all, this isn't relevant
    # If we are given a start date, use that, otherwise we need to get the last date from file
    # If there is no last export, we'll set an arbitrary start date to grab everything (2000-01-01)
    last_export = read_timestamp()
    export_type = 'incr'
    if args.all:
        print "Performing full puppet module export"
        export_type = 'full'
    else:
        if not since:
            if args.last:
                if last_export:
                    print "Last successful export was started at " + last_export
                else:
                    print "Export has never been performed"
                sys.exit(-1)
            if not last_export:
                print "No previous export recorded, performing full puppet module export"
                export_type = 'full'
        else:
            last_export = str(since)

            # We have our timestamp so we can kick of an incremental export
            print "Incremental export of puppet modules synchronised after " + last_export

    # TODO: Remove any previous exported content
#    os.chdir(helpers.EXPORTDIR)
#    shutil.rmtree()

    # Check if there are any currently running tasks that will conflict with an export
    check_running_tasks()

    # Now we have a CV ID and a starting date, and no conflicting tasks, we can export
    export_puppet(last_export, export_type)

    # Now we need to process the on-disk export data
    # Find the name of our export dir. This ASSUMES that the export dir is the ONLY dir.
    sat_export_dir = os.walk(helpers.EXPORTDIR).next()[1]
    export_path = sat_export_dir[0]

    # This portion finds the full directory tree of the Puppet repo, starting at the level
    # containing the Org_Name (/var/lib/pulp/published/puppet/http/repos/<org_name>/...)
    # pylint: disable=unused-variable
    for dirpath, subdirs, files in os.walk(helpers.EXPORTDIR):
        for tdir in subdirs:
            if org_name in tdir:
                export_dir = os.path.join(dirpath, tdir)

    # Add our exported data to a tarfile
    create_tar(export_dir, export_path)

    # We're done. Write the start timestamp to file for next time
    os.chdir(script_dir)
    write_timestamp(start_time)

    # And we're done!
    print helpers.GREEN + "Puppet module export complete.\n" + helpers.ENDC
    print 'Please transfer the contents of ' + helpers.EXPORTDIR + \
        ' to your disconnected puppet-forge server content location.\n'
def main(args):
    """
    Main routine
    """

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Set the base dir of the script and where the var data is
    global dir
    global vardir
    dir = os.path.dirname(__file__)
    vardir = os.path.join(dir, 'var')

    # Check for sane input
    parser = argparse.ArgumentParser(
        description='Promotes content views for specified organization to the target environment.')
    group = parser.add_mutually_exclusive_group()
    # pylint: disable=bad-continuation
    parser.add_argument('-e', '--env', help='Target Environment (e.g. Development, Quality, Production)',
        required=False)
    parser.add_argument('-o', '--org', help='Organization (Uses default if not specified)',
        required=False)
    group.add_argument('-a', '--all', help='Promote ALL content views', required=False,
        action="store_true")
    parser.add_argument('-d', '--dryrun', help='Dry Run - Only show what will be promoted',
        required=False, action="store_true")
    parser.add_argument('-l', '--last', help='Display last promotions', required=False,
        action="store_true")
    parser.add_argument('-q', '--quiet', help="Suppress progress output updates", required=False,
        action="store_true")
    parser.add_argument('-m', '--forcemeta', help="Force metadata regeneration", required=False,
        action="store_true")

    args = parser.parse_args()

    # Log the fact we are starting
    msg = "-------- Content view promotion started by " + runuser + " -----------"
    helpers.log_msg(msg, 'INFO')

    # Set our script variables from the input args
    if args.org:
        org_name = args.org
    else:
       org_name = helpers.ORG_NAME
    target_env = args.env
    dry_run = args.dryrun

    # Load the promotion history
    if not os.path.exists(vardir + '/promotions.pkl'):
        if not os.path.exists(vardir):
            os.makedirs(vardir)
        phistory = {}
    else:
        phistory = pickle.load(open(vardir + '/promotions.pkl', 'rb'))

    # Read the promotion history if --last requested
    if args.last:
        if phistory:
            print 'Last promotions:'
            for lenv, time in phistory.iteritems():
                print lenv, time
        else:
            print 'No promotions recorded'
        sys.exit(0)

    # Error if no environment to promote to is given
    if args.env is None:
        parser.error('--env is required')

    promote_list = []
    if not args.all:
        for x in helpers.CONFIG['promotion']:
            if x == 'batch':
                continue
            if helpers.CONFIG['promotion'][x]['name'] == target_env:
                promote_list = helpers.CONFIG['promotion'][x]['content_views']

        if not promote_list:
            msg = "Cannot find promotion configuration for '" + target_env + "'"
            helpers.log_msg(msg, 'ERROR')
            if helpers.MAILOUT:
                helpers.tf.seek(0)
                output = "{}".format(helpers.tf.read())
                helpers.mailout(helpers.MAILSUBJ_FP, output)
            sys.exit(1)

        msg = "Config found for CV's " + str(promote_list)
        helpers.log_msg(msg, 'DEBUG')

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)

    # Now, let's fetch all available lifecycle environments for this org...
    (env_list, prior_list) = get_envs(org_id)

    # Get the list of Content Views along with the latest view version in each environment
    (ver_list, ver_descr, ver_version) = get_cv(org_id, target_env, env_list, prior_list,
        promote_list)

    # Promote to the given environment. Returns a list of task IDs.
    promote(target_env, ver_list, ver_descr, ver_version, env_list, prior_list, dry_run,
        args.quiet, args.forcemeta)

    # Add/Update the promotion history dictionary so we can check when we last promoted
    phistory[target_env] = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
    pickle.dump(phistory, open(vardir + '/promotions.pkl', 'wb'))

    # Run the mailout
    if helpers.MAILOUT:
        helpers.tf.seek(0)
        output = "{}".format(helpers.tf.read())
        message = "Promotion completed successfully\n\n" + output
        subject = "Satellite 6 promotion completed"
        helpers.mailout(subject, message)

    # Exit cleanly
    sys.exit(0)
示例#10
0
def main(args):
    """
    Main routine
    """

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Check for sane input
    parser = argparse.ArgumentParser(
        description='export GPG keys to defined directory')
    # pylint: disable=bad-continuation
    parser.add_argument('-o',
                        '--org',
                        help='Organization (Uses default if not specified)',
                        required=False)
    parser.add_argument('-d',
                        '--dryrun',
                        help='Dry Run - Only show GPG keys',
                        required=False,
                        action="store_true")
    parser.add_argument('-t',
                        '--target',
                        help='Define target director for keys',
                        required=False)
    parser.add_argument('-p',
                        '--plain',
                        help='No faked directory structure for satellite',
                        required=False,
                        action="store_true")

    args = parser.parse_args()

    # Set our script variables from the input args
    if args.org:
        org_name = args.org
    else:
        org_name = helpers.ORG_NAME
    if args.plain:
        plain = args.plain
    else:
        plain = False
    dry_run = args.dryrun

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)

    # Get the list of Content Views along with the latest view version in each environment
    gpg_result = get_gpg(org_id)

    # store GPG keys to given export dir
    if not dry_run:
        if args.target:
            targetdir = args.target
            store_gpg(gpg_result, targetdir, plain)
        else:
            parser.print_help()
    else:
        print json.dumps(gpg_result, indent=4, sort_keys=False)

    # Exit cleanly
    sys.exit(0)
示例#11
0
def main(args):
    """
    Main Routine
    """
    #pylint: disable-msg=R0912,R0914,R0915

    if helpers.DISCONNECTED:
        msg = "Export cannot be run on the disconnected Satellite host"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Set the base dir of the script and where the var data is
    global dir 
    global vardir 
    dir = os.path.dirname(__file__)
    vardir = os.path.join(dir, 'var')

    # Log the fact we are starting
    msg = "------------- Content export started by " + runuser + " ----------------"
    helpers.log_msg(msg, 'INFO')

    # Check for sane input
    parser = argparse.ArgumentParser(description='Performs Export of Default Content View.')
    group = parser.add_mutually_exclusive_group()
    # pylint: disable=bad-continuation
    parser.add_argument('-o', '--org', help='Organization (Uses default if not specified)',
        required=False)
    parser.add_argument('-e', '--env', help='Environment config file', required=False)
    group.add_argument('-a', '--all', help='Export ALL content', required=False,
        action="store_true")
    group.add_argument('-i', '--incr', help='Incremental Export of content since last run',
        required=False, action="store_true")
    group.add_argument('-s', '--since', help='Export content since YYYY-MM-DD HH:MM:SS',
        required=False, type=helpers.valid_date)
    parser.add_argument('-l', '--last', help='Display time of last export', required=False,
        action="store_true")
    parser.add_argument('-n', '--nogpg', help='Skip GPG checking', required=False,
        action="store_true")
    parser.add_argument('-r', '--repodata', help='Include repodata for repos with no new packages', 
        required=False, action="store_true")
    args = parser.parse_args()

    # Set our script variables from the input args
    if args.org:
        org_name = args.org
    else:
       org_name = helpers.ORG_NAME
    since = args.since

    # Record where we are running from
    script_dir = str(os.getcwd())

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)
    exported_repos = []
    # If a specific environment is requested, find and read that config file
    repocfg = os.path.join(dir, 'config/' + args.env + '.yml')
    if args.env:
        if not os.path.exists(repocfg):
            print "ERROR: Config file " + repocfg + " not found."
            sys.exit(-1)
        cfg = yaml.safe_load(open(repocfg, 'r'))
        ename = args.env
        erepos = cfg["env"]["repos"]
        msg = "Specific environment export called for " + ename + ". Configured repos:"
        helpers.log_msg(msg, 'DEBUG')
        for repo in erepos:
            msg = "  - " + repo
            helpers.log_msg(msg, 'DEBUG')

    else:
        ename = 'DoV'
        label = 'DoV'
        msg = "DoV export called"
        helpers.log_msg(msg, 'DEBUG')

    # Get the current time - this will be the 'last export' time if the export is OK
    start_time = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
    print "START: " + start_time + " (" + ename + " export)"

    # Read the last export date pickle for our selected repo group.
    export_times = read_pickle(ename)
    export_type = 'incr'

    if args.all:
        print "Performing full content export for " + ename
        export_type = 'full'
        since = False
    else:
        if not since:
            since = False
            if args.last:
                if export_times:
                    print "Last successful export for " + ename + ":"
                    for time in export_times:
                        repo = "{:<70}".format(time)
                        print repo[:70] + '\t' + str(export_times[time])
                else:
                    print "Export has never been performed for " + ename
                sys.exit(-1)
            if not export_times:
                print "No prior export recorded for " + ename + ", performing full content export"
                export_type = 'full'
        else:
            # Re-populate export_times dictionary so each repo has 'since' date
            since_export = str(since)

            # We have our timestamp so we can kick of an incremental export
            print "Incremental export of content for " + ename + " synchronised after " \
            + str(since)

    # Check the available space in /var/lib/pulp
    check_disk_space(export_type)

    # Remove any previous exported content left behind by prior unclean exit
    if os.path.exists(helpers.EXPORTDIR + '/export'):
        msg = "Removing existing export directory"
        helpers.log_msg(msg, 'DEBUG')
        shutil.rmtree(helpers.EXPORTDIR + '/export')

    # Collect a list of enabled repositories. This is needed for:
    # 1. Matching specific repo exports, and
    # 2. Running import sync per repo on the disconnected side
    repolist = helpers.get_p_json(
        helpers.KATELLO_API + "/repositories/", \
                json.dumps(
                        {
                           "organization_id": org_id,
                           "per_page": '1000',
                        }
                ))

    # If we are running a full DoV export we run a different set of API calls...
    if ename == 'DoV':
        cola = "Exporting DoV"
        if export_type == 'incr' and 'DoV' in export_times:
            last_export = export_times['DoV']
            if since:
                last_export = since_export
            colb = "(INCR since " + last_export + ")"
        else:
            export_type = 'full'
            last_export = '2000-01-01 12:00:00' # This is a dummy value, never used.
            colb = "(FULL)"
        msg = cola + " " + colb
        helpers.log_msg(msg, 'INFO')
        output = "{:<70}".format(cola)
        print output[:70] + ' ' + colb

        # Check if there are any currently running tasks that will conflict with an export
        check_running_tasks(label, ename)

        # Get the version of the CV (Default Org View) to export
        dov_ver = get_cv(org_id)

        # Now we have a CV ID and a starting date, and no conflicting tasks, we can export
        export_id = export_cv(dov_ver, last_export, export_type)

        # Now we need to wait for the export to complete
        helpers.wait_for_task(export_id, 'export')

        # Check if the export completed OK. If not we exit the script.
        tinfo = helpers.get_task_status(export_id)
        if tinfo['state'] != 'running' and tinfo['result'] == 'success':
            msg = "Content View Export OK"
            helpers.log_msg(msg, 'INFO')
            print helpers.GREEN + msg + helpers.ENDC

            # Update the export timestamp for this repo
            export_times['DoV'] = start_time

            # Generate a list of repositories that were exported
            for repo_result in repolist['results']:
                if repo_result['content_type'] == 'yum':
                    # Add the repo to the successfully exported list
                    exported_repos.append(repo_result['label'])

        else:
            msg = "Content View Export FAILED"
            helpers.log_msg(msg, 'ERROR')
            sys.exit(-1)

    else:
        # Verify that defined repos exist in Satellite
        for repo in erepos:
            repo_in_sat = False
            for repo_x in repolist['results']:
                if re.findall("\\b" + repo + "\\b$", repo_x['label']):
                    repo_in_sat = True
                    break
            if repo_in_sat == False:
                msg = "'" + repo + "' not found in Satellite"
                helpers.log_msg(msg, 'WARNING')

        # Process each repo
        for repo_result in repolist['results']:
            if repo_result['content_type'] == 'yum':
                # If we have a match, do the export
                if repo_result['label'] in erepos:
                    # Extract the last export time for this repo
                    orig_export_type = export_type
                    cola = "Export " + repo_result['label']
                    if export_type == 'incr' and repo_result['label'] in export_times:
                        last_export = export_times[repo_result['label']]
                        if since:
                            last_export = since_export
                        colb = "(INCR since " + last_export + ")"
                    else:
                        export_type = 'full'
                        last_export = '2000-01-01 12:00:00' # This is a dummy value, never used.
                        colb = "(FULL)"
                    msg = cola + " " + colb
                    helpers.log_msg(msg, 'INFO')
                    output = "{:<70}".format(cola)
                    print output[:70] + ' ' + colb

                    # Check if there are any currently running tasks that will conflict
                    ok_to_export = check_running_tasks(repo_result['label'], ename)

                    if ok_to_export:
                        # Trigger export on the repo
                        export_id = export_repo(repo_result['id'], last_export, export_type)

                        # Now we need to wait for the export to complete
                        helpers.wait_for_task(export_id, 'export')

                        # Check if the export completed OK. If not we exit the script.
                        tinfo = helpers.get_task_status(export_id)
                        if tinfo['state'] != 'running' and tinfo['result'] == 'success':
                            # Count the number of exported packages
                            # First resolve the product label - this forms part of the export path
                            product = get_product(org_id, repo_result['product']['cp_id'])
                            # Now we can build the export path itself
                            basepath = helpers.EXPORTDIR + "/" + org_name + "-" + product + "-" + repo_result['label']
                            if export_type == 'incr':
                                basepath = basepath + "-incremental"
                            exportpath = basepath + "/" + repo_result['relative_path']
                            msg = "\nExport path = " + exportpath
                            helpers.log_msg(msg, 'DEBUG')

                            os.chdir(exportpath)
                            numrpms = len([f for f in os.walk(".").next()[2] if f[ -4: ] == ".rpm"])

                            msg = "Repository Export OK (" + str(numrpms) + " new packages)"
                            helpers.log_msg(msg, 'INFO')
                            print helpers.GREEN + msg + helpers.ENDC

                            # Update the export timestamp for this repo
                            export_times[repo_result['label']] = start_time

                            # Add the repo to the successfully exported list
                            if numrpms != 0 or args.repodata:
                                msg = "Adding " + repo_result['label'] + " to export list"
                                helpers.log_msg(msg, 'DEBUG')
                                exported_repos.append(repo_result['label'])
                            else:
                                msg = "Not including repodata for empty repo " + repo_result['label']
                                helpers.log_msg(msg, 'DEBUG')

                        else:
                            msg = "Export FAILED"
                            helpers.log_msg(msg, 'ERROR')

                        # Reset the export type to the user specified, in case we overrode it.
                        export_type = orig_export_type

                else:
                    msg = "Skipping  " + repo_result['label']
                    helpers.log_msg(msg, 'DEBUG')

            # Handle FILE type exports (ISO repos)
            elif repo_result['content_type'] == 'file':
                # If we have a match, do the export
                if repo_result['label'] in erepos:
                    # Extract the last export time for this repo
                    orig_export_type = export_type
                    cola = "Export " + repo_result['label']
                    if export_type == 'incr' and repo_result['label'] in export_times:
                        last_export = export_times[repo_result['label']]
                        if since:
                            last_export = since_export
                        colb = "(INCR since " + last_export + ")"
                    else:
                        export_type = 'full'
                        last_export = '2000-01-01 12:00:00' # This is a dummy value, never used.
                        colb = "(FULL)"
                    msg = cola + " " + colb
                    helpers.log_msg(msg, 'INFO')
                    output = "{:<70}".format(cola)
                    print output[:70] + ' ' + colb

                    # Check if there are any currently running tasks that will conflict
                    ok_to_export = check_running_tasks(repo_result['label'], ename)

                    if ok_to_export:
                        # Trigger export on the repo
                        numfiles = export_iso(repo_result['id'], repo_result['label'], repo_result['relative_path'], last_export, export_type)

                        # Reset the export type to the user specified, in case we overrode it.
                        export_type = orig_export_type

                        # Update the export timestamp for this repo
                        export_times[repo_result['label']] = start_time
                        
                        # Add the repo to the successfully exported list
                        if numfiles != 0 or args.repodata:
                            msg = "Adding " + repo_result['label'] + " to export list"
                            helpers.log_msg(msg, 'DEBUG')
                            exported_repos.append(repo_result['label'])
                        else:
                            msg = "Not including repodata for empty repo " + repo_result['label']
                            helpers.log_msg(msg, 'DEBUG')

                else:
                    msg = "Skipping  " + repo_result['label']
                    helpers.log_msg(msg, 'DEBUG')



    # Combine resulting directory structures into a single repo format (top level = /content)
    prep_export_tree(org_name)

    # Now we need to process the on-disk export data.
    # Define the location of our exported data.
    export_dir = helpers.EXPORTDIR + "/export"

    # Write out the list of exported repos. This will be transferred to the disconnected system
    # and used to perform the repo sync tasks during the import.
    pickle.dump(exported_repos, open(export_dir + '/exported_repos.pkl', 'wb'))

    # Run GPG Checks on the exported RPMs
    if not args.nogpg:
        do_gpg_check(export_dir)

    # Add our exported data to a tarfile
    create_tar(export_dir, ename)

    # We're done. Write the start timestamp to file for next time
    os.chdir(script_dir)
    pickle.dump(export_times, open(vardir + '/exports_' + ename + '.pkl', "wb"))

    # And we're done!
    print helpers.GREEN + "Export complete.\n" + helpers.ENDC
    print 'Please transfer the contents of ' + helpers.EXPORTDIR + \
        ' to your disconnected Satellite system content import location.\n' \
        'Once transferred, please run ' + helpers.BOLD + ' sat_import' \
        + helpers.ENDC + ' to extract it.'
示例#12
0
def main():
    """
    Main Routine
    """
    #pylint: disable-msg=R0912,R0914,R0915

    if not helpers.DISCONNECTED:
        msg = "Import cannot be run on the connected Satellite (Sync) host"
        helpers.log_msg(msg, 'ERROR')
        sys.exit(-1)

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Log the fact we are starting
    msg = "------------- Content import started by " + runuser + " ----------------"
    helpers.log_msg(msg, 'INFO')

    # Check for sane input
    parser = argparse.ArgumentParser(description='Performs Import of Default Content View.')
    # pylint: disable=bad-continuation
    parser.add_argument('-o', '--org', help='Organization', required=True)
    parser.add_argument('-d', '--date', \
        help='Date/name of Import fileset to process (YYYY-MM-DD_NAME)', required=True)
    parser.add_argument('-n', '--nosync', help='Do not trigger a sync after extracting content',
        required=False, action="store_true")
    parser.add_argument('-r', '--remove', help='Remove input files after import has completed',
        required=False, action="store_true")
    args = parser.parse_args()

    # Set our script variables from the input args
    org_name = args.org
    expdate = args.date

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)

    # Figure out if we have the specified input fileset
    basename = get_inputfiles(expdate)

    # Cleanup from any previous imports
    os.system("rm -rf " + helpers.IMPORTDIR + "/{content,custom,listing,*.pkl}")

    # Extract the input files
    extract_content(basename)

    # Trigger a sync of the content into the Library
    if args.nosync:
        print helpers.GREEN + "Import complete.\n" + helpers.ENDC
        msg = "Repository sync was requested to be skipped"
        helpers.log_msg(msg, 'WARNING')
        print 'Please synchronise all repositories to make new content available for publishing.'
        delete_override = False
    else:
        # We need to figure out which repos to sync. This comes to us via a pickle containing
        # a list of repositories that were exported
        imported_repos = pickle.load(open('exported_repos.pkl', 'rb'))

        # Run a repo sync on each imported repo
        (task_id, delete_override) = sync_content(org_id, imported_repos)

        # Now we need to wait for the sync to complete
        helpers.wait_for_task(task_id, 'sync')

        print helpers.GREEN + "Import complete.\n" + helpers.ENDC
        print 'Please publish content views to make new content available.'

    if args.remove and not delete_override:
        msg = "Removing " + helpers.IMPORTDIR + "/sat6_export_" + expdate + "* input files"
        helpers.log_msg(msg, 'DEBUG')
#        os.system("rm -f " + helpers.IMPORTDIR + "/sat6_export_" + expdate) + "*"

    msg = "Import Complete"
    helpers.log_msg(msg, 'INFO')
示例#13
0
def main(args):
    """Promote Content Views from the previous lifecycle environment."""

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Set the base dir of the script and where the var data is
    global dir
    global vardir
    dir = os.path.dirname(__file__)
    vardir = os.path.join(dir, 'var')
    confdir = os.path.join(dir, 'config')

    # Check for sane input
    parser = argparse.ArgumentParser(
        description='Publishes content views for specified organization.')
    group = parser.add_mutually_exclusive_group()
    # pylint: disable=bad-continuation
    parser.add_argument('-o', '--org', help='Organization (Uses default if not specified)',
        required=False)
    group.add_argument('-a', '--all', help='Publish ALL content views', required=False,
        action="store_true")
    parser.add_argument('-d', '--dryrun', help='Dry Run - Only show what will be published',
        required=False, action="store_true")
    parser.add_argument('-l', '--last', help='Display last promotions', required=False,
        action="store_true")
    parser.add_argument('-c', '--comment', help="Add a custom description", required=False)
    parser.add_argument('-q', '--quiet', help="Suppress progress output updates", required=False,
        action="store_true")
    parser.add_argument('-m', '--forcemeta', help="Force metadata regeneration", required=False,
        action="store_true")

    args = parser.parse_args()

    # Log the fact we are starting
    if not args.last:
        msg = "-------- Content view publish started by " + runuser + " -----------"
        helpers.log_msg(msg, 'INFO')

    # Set our script variables from the input args
    if args.org:
        org_name = args.org
    else:
        org_name = helpers.ORG_NAME
    dry_run = args.dryrun

    # Set up the description that will be added to the published version
    if args.comment:
        description = args.comment
    else:
        description = "Published by " + runuser + "\n via API script"

    # Load the promotion history
    if not os.path.exists(vardir + '/promotions.pkl'):
        if not os.path.exists(vardir):
            os.makedirs(vardir)
        phistory = {}
    else:
        phistory = pickle.load(open(vardir + '/promotions.pkl', 'rb'))

    # Read the promotion history if --last requested
    if args.last:
        if phistory:
            print 'Last promotions:'
            for lenv, time in phistory.iteritems():
                print lenv, time
        else:
            print 'No promotions recorded'
        sys.exit(0)

    publish_list = []
    if not args.all:
        publish_list = helpers.CONFIG['publish']['content_views']

        if not publish_list:
            msg = "Cannot find publish configuration"
            helpers.log_msg(msg, 'ERROR')
            if helpers.MAILOUT:
                helpers.tf.seek(0)
                output = "{}".format(helpers.tf.read())
                helpers.mailout(helpers.MAILSUBJ_FP, output)
            sys.exit(1)

        msg = "Config found for CV's " + str(publish_list)
        helpers.log_msg(msg, 'DEBUG')

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)

    # Get the list of Content Views along with the latest view version in each environment
    (ver_list, ver_descr, ver_version) = get_cv(org_id, publish_list)

    # Publish the content views. Returns a list of task IDs.
    publish(ver_list, ver_descr, ver_version, dry_run, runuser, description, args.quiet,
        args.forcemeta)

    # Add/Update the promotion history dictionary so we can check when we last promoted
    phistory['Library'] = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
    pickle.dump(phistory, open(vardir + '/promotions.pkl', 'wb'))

    # Run the mailout
    if helpers.MAILOUT:
        helpers.tf.seek(0)
        output = "{}".format(helpers.tf.read())
        message = "Publish completed successfully\n\n" + output
        subject = "Satellite 6 publish completed"
        helpers.mailout(subject, message)

    # Exit cleanly
    sys.exit(0)
def main(args):
    """
    Main routine
    """

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Check for sane input
    parser = argparse.ArgumentParser(
        description='Cleans content views for specified organization.')
    group = parser.add_mutually_exclusive_group()
    # pylint: disable=bad-continuation
    parser.add_argument('-o', '--org', help='Organization (Uses default if not specified)',
        required=False)
    parser.add_argument('-k', '--keep', help='How many old versions to keep (only used with -a)',
        required=False)
    group.add_argument('-a', '--all', help='Clean ALL content views', required=False,
        action="store_true")
    parser.add_argument('-c', '--cleanall', help='Remove orphan versions between in-use views',
        required=False, action="store_true")
    parser.add_argument('-i', '--ignorefirstpromoted', help='Version to keep count starts from first CV, not first promoted CV',
        required=False, action="store_true")
    parser.add_argument('-d', '--dryrun', help='Dry Run - Only show what will be cleaned',
        required=False, action="store_true")

    args = parser.parse_args()

    # Log the fact we are starting
    msg = "-------- Content view cleanup started by " + runuser + " -----------"
    helpers.log_msg(msg, 'INFO')

    # Set our script variables from the input args
    if args.org:
        org_name = args.org
    else:
       org_name = helpers.ORG_NAME
    dry_run = args.dryrun
    cleanall = args.cleanall
    ignorefirstpromoted = args.ignorefirstpromoted
    if args.keep:
        keep = args.keep
    else:
        keep = "0"

    cleanup_list = []
    if not args.all:
        cleanup_list = helpers.CONFIG['cleanup']['content_views']

        if not cleanup_list:
            msg = "Cannot find cleanup configuration"
            helpers.log_msg(msg, 'ERROR')
            sys.exit(1)

        msg = "Config found for CV's " + str(cleanup_list)
        helpers.log_msg(msg, 'DEBUG')

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)

    # Get the list of Content Views along with the latest view version in each environment
    (ver_list, ver_descr, ver_keep) = get_cv(org_id, cleanup_list, keep)

    # Clean the content views. Returns a list of task IDs.
    cleanup(ver_list, ver_descr, dry_run, runuser, ver_keep, cleanall, ignorefirstpromoted)

    # Exit cleanly
    sys.exit(0)
示例#15
0
def main(args):
    """Perform import of Default Content View."""
    #pylint: disable-msg=R0912,R0914,R0915

    if not helpers.DISCONNECTED:
        msg = "Import cannot be run on the connected Satellite (Sync) host"
        helpers.log_msg(msg, 'ERROR')
        if helpers.MAILOUT:
            helpers.tf.seek(0)
            output = "{}".format(helpers.tf.read())
            helpers.mailout(helpers.MAILSUBJ_FI, output)
        sys.exit(1)

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Set the base dir of the script and where the var data is
    global dir
    global vardir
    dir = os.path.dirname(__file__)
    vardir = os.path.join(dir, 'var')

    # Log the fact we are starting
    msg = "------------- Content import started by " + runuser + " ----------------"
    helpers.log_msg(msg, 'INFO')

    # Check for sane input
    parser = argparse.ArgumentParser(
        description='Performs Import of Default Content View.')
    # pylint: disable=bad-continuation
    parser.add_argument('-o',
                        '--org',
                        help='Organization (Uses default if not specified)',
                        required=False)
    parser.add_argument('-d', '--dataset', \
        help='Date/name of Import dataset to process (YYYY-MM-DD_NAME)', required=False)
    parser.add_argument('-n',
                        '--nosync',
                        help='Do not trigger a sync after extracting content',
                        required=False,
                        action="store_true")
    parser.add_argument('-r',
                        '--remove',
                        help='Remove input files after import has completed',
                        required=False,
                        action="store_true")
    parser.add_argument('-l',
                        '--last',
                        help='Display the last successful import performed',
                        required=False,
                        action="store_true")
    parser.add_argument('-L',
                        '--list',
                        help='List all successfully completed imports',
                        required=False,
                        action="store_true")
    parser.add_argument('-c',
                        '--count',
                        help='Display all package counts after import',
                        required=False,
                        action="store_true")
    parser.add_argument(
        '-f',
        '--force',
        help='Force import of data if it has previously been done',
        required=False,
        action="store_true")
    parser.add_argument(
        '-u',
        '--unattended',
        help='Answer any prompts safely, allowing automated usage',
        required=False,
        action="store_true")
    parser.add_argument('--fixhistory',
                        help='Force import history to match export history',
                        required=False,
                        action="store_true")
    args = parser.parse_args()

    # Set our script variables from the input args
    if args.org:
        org_name = args.org
    else:
        org_name = helpers.ORG_NAME
    dataset = args.dataset

    if args.fixhistory:
        fixhistory = True
    else:
        fixhistory = False

    # Record where we are running from
    script_dir = str(os.getcwd())

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)

    imports = []
    # Read the last imports data
    if os.path.exists(vardir + '/imports.pkl'):
        imports = pickle.load(open(vardir + '/imports.pkl', 'rb'))
        # If we have a string we convert to a list. This should only occur the first time we
        # migrate from the original string version of the pickle.
        if type(imports) is str:
            imports = imports.split()
        last_import = imports[-1]
    # Display the last successful import(s)
    if args.last or args.list:
        if os.path.exists(vardir + '/imports.pkl'):
            if args.last:
                msg = "Last successful import was " + last_import
                helpers.log_msg(msg, 'INFO')
                print msg
            if args.list:
                print "Completed imports:\n----------------"
                for item in imports:
                    print item
        else:
            msg = "Import has never been performed"
            helpers.log_msg(msg, 'INFO')
            print msg
        sys.exit(0)

    # If we got this far without -d being specified, error out cleanly
    if args.dataset is None:
        parser.error("--dataset is required")

    # If we have already imported this dataset let the user know
    if dataset in imports:
        if not args.force:
            msg = "Dataset " + dataset + " has already been imported. Use --force if you really want to do this."
            helpers.log_msg(msg, 'WARNING')
            sys.exit(2)

    # Figure out if we have the specified input fileset
    basename = get_inputfiles(dataset)

    # Cleanup from any previous imports
    os.system("rm -rf " + helpers.IMPORTDIR +
              "/{content,custom,listing,*.pkl}")

    # Extract the input files
    extract_content(basename)

    # Read in the export history from the input dataset
    dsname = dataset.split('_')[1]
    exports = pickle.load(
        open(helpers.IMPORTDIR + '/exporthistory_' + dsname + '.pkl', 'rb'))

    # Check for and let the user decide if they want to continue with missing imports
    missing_imports = check_missing(imports, exports, dataset, fixhistory,
                                    vardir)
    if missing_imports:
        msg = "Run sat_import with the --fixhistory flag to reset the import history to this export"
        helpers.log_msg(msg, 'INFO')
        if not args.unattended:
            answer = helpers.query_yes_no("Continue with import?", "no")
            if not answer:
                msg = "Import Aborted"
                helpers.log_msg(msg, 'ERROR')
                sys.exit(3)
            else:
                msg = "Import continued by user"
                helpers.log_msg(msg, 'INFO')
        else:
            msg = "Import Aborted"
            helpers.log_msg(msg, 'ERROR')
            if helpers.MAILOUT:
                helpers.tf.seek(0)
                output = "{}".format(helpers.tf.read())
                helpers.mailout(helpers.MAILSUBJ_FI, output)
            sys.exit(3)

    # Trigger a sync of the content into the Library
    if args.nosync:
        #print helpers.GREEN + "Import complete.\n" + helpers.ENDC
        msg = "Repository sync was requested to be skipped"
        helpers.log_msg(msg, 'WARNING')
        print 'Please synchronise all repositories to make new content available for publishing.'
        delete_override = True
    else:
        # We need to figure out which repos to sync. This comes to us via a pickle containing
        # a list of repositories that were exported
        imported_repos = pickle.load(open('exported_repos.pkl', 'rb'))
        package_count = pickle.load(open('package_count.pkl', 'rb'))

        # Run a repo sync on each imported repo
        (delete_override, newrepos) = sync_content(org_id, imported_repos)

        print helpers.GREEN + "Import complete.\n" + helpers.ENDC
        print 'Please publish content views to make new content available.'

        # Verify the repository package/erratum counts match the sync host
        check_counts(org_id, package_count, args.count)

    if os.path.exists(helpers.IMPORTDIR + '/puppetforge'):
        print 'Offline puppet-forge-server bundle is available to import seperately in '\
            + helpers.IMPORTDIR + '/puppetforge\n'

    if args.remove and not delete_override:
        msg = "Removing input files from " + helpers.IMPORTDIR
        helpers.log_msg(msg, 'INFO')
        print msg
        os.system("rm -f " + helpers.IMPORTDIR + "/sat6_export_" + dataset +
                  "*")
        os.system("rm -rf " + helpers.IMPORTDIR +
                  "/{content,custom,listing,*.pkl}")
        excode = 0
    elif delete_override:
        msg = "* Not removing input files due to incomplete sync *"
        helpers.log_msg(msg, 'INFO')
        print msg
        excode = 2
    else:
        msg = " (Removal of input files was not requested)"
        helpers.log_msg(msg, 'INFO')
        print msg
        excode = 0

    msg = "Import Complete"
    helpers.log_msg(msg, 'INFO')

    # Save the last completed import data (append to existing pickle)
    os.chdir(script_dir)
    if not os.path.exists(vardir):
        os.makedirs(vardir)
    imports.append(dataset)
    pickle.dump(imports, open(vardir + '/imports.pkl', "wb"))

    # Run the mailout
    if helpers.MAILOUT:
        helpers.tf.seek(0)
        output = "{}".format(helpers.tf.read())
        if missing_imports:
            message = "Import of dataset " + dataset + " completed successfully.\n\n \
                Missing datasets were detected during the import - please check the logs\n\n" + output
            subject = "Satellite 6 import completed: Missing datasets"
        elif newrepos:
            message = "Import of dataset " + dataset + " completed successfully.\n\n \
                New repos found that need to be imported manually - please check the logs \n\n" + output
            subject = "Satellite 6 import completed: New repos require manual intervention"
        else:
            message = "Import of dataset " + dataset + " completed successfully\n\n" + output
            subject = "Satellite 6 import completed"
        helpers.mailout(subject, message)

    # And exit.
    sys.exit(excode)
def main(args):
    """
    Main routine
    """

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Set the base dir of the script and where the var data is
    global dir
    global vardir
    dir = os.path.dirname(__file__)
    vardir = os.path.join(dir, 'var')

    # Check for sane input
    parser = argparse.ArgumentParser(
        description='Promotes content views for specified organization to the target environment.')
    group = parser.add_mutually_exclusive_group()
    # pylint: disable=bad-continuation
    parser.add_argument('-e', '--env', help='Target Environment (e.g. Development, Quality, Production)',
        required=False)
    parser.add_argument('-o', '--org', help='Organization (Uses default if not specified)',
        required=False)
    group.add_argument('-a', '--all', help='Promote ALL content views', required=False,
        action="store_true")
    parser.add_argument('-d', '--dryrun', help='Dry Run - Only show what will be promoted',
        required=False, action="store_true")
    parser.add_argument('-l', '--last', help='Display last promotions', required=False,
        action="store_true")
    parser.add_argument('-q', '--quiet', help="Suppress progress output updates", required=False,
        action="store_true")

    args = parser.parse_args()

    # Log the fact we are starting
    msg = "-------- Content view promotion started by " + runuser + " -----------"
    helpers.log_msg(msg, 'INFO')

    # Set our script variables from the input args
    if args.org:
        org_name = args.org
    else:
       org_name = helpers.ORG_NAME
    target_env = args.env
    dry_run = args.dryrun

    # Load the promotion history
    if not os.path.exists(vardir + '/promotions.pkl'):
        if not os.path.exists(vardir):
            os.makedirs(vardir)
        phistory = {}
    else:
        phistory = pickle.load(open(vardir + '/promotions.pkl', 'rb'))

    # Read the promotion history if --last requested
    if args.last:
        if phistory:
            print 'Last promotions:'
            for lenv, time in phistory.iteritems():
                print lenv, time
        else:
            print 'No promotions recorded'
        sys.exit(0)

    # Error if no environment to promote to is given
    if args.env is None:
        parser.error('--env is required')

    promote_list = []
    if not args.all:
        for x in helpers.CONFIG['promotion']:
            if x == 'batch':
                continue
            if helpers.CONFIG['promotion'][x]['name'] == target_env:
                promote_list = helpers.CONFIG['promotion'][x]['content_views']

        if not promote_list:
            msg = "Cannot find promotion configuration for '" + target_env + "'"
            helpers.log_msg(msg, 'ERROR')
            if helpers.MAILOUT:
                helpers.tf.seek(0)
                output = "{}".format(helpers.tf.read())
                helpers.mailout(helpers.MAILSUBJ_FP, output)
            sys.exit(1)

        msg = "Config found for CV's " + str(promote_list)
        helpers.log_msg(msg, 'DEBUG')

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)

    # Now, let's fetch all available lifecycle environments for this org...
    (env_list, prior_list) = get_envs(org_id)

    # Get the list of Content Views along with the latest view version in each environment
    (ver_list, ver_descr, ver_version) = get_cv(org_id, target_env, env_list, prior_list,
        promote_list)

    # Promote to the given environment. Returns a list of task IDs.
    promote(target_env, ver_list, ver_descr, ver_version, env_list, prior_list, dry_run, args.quiet)

    # Add/Update the promotion history dictionary so we can check when we last promoted
    phistory[target_env] = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
    pickle.dump(phistory, open(vardir + '/promotions.pkl', 'wb'))

    # Run the mailout
    if helpers.MAILOUT:
        helpers.tf.seek(0)
        output = "{}".format(helpers.tf.read())
        message = "Promotion completed successfully\n\n" + output
        subject = "Satellite 6 promotion completed"
        helpers.mailout(subject, message)

    # Exit cleanly
    sys.exit(0)
def main(args):
    """
    Main routine
    """

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Set the base dir of the script and where the var data is
    global dir
    global vardir
    dir = os.path.dirname(__file__)
    vardir = os.path.join(dir, 'var')
    confdir = os.path.join(dir, 'config')

    # Check for sane input
    parser = argparse.ArgumentParser(
        description='Publishes content views for specified organization.')
    group = parser.add_mutually_exclusive_group()
    # pylint: disable=bad-continuation
    parser.add_argument('-o', '--org', help='Organization (Uses default if not specified)',
        required=False)
    group.add_argument('-a', '--all', help='Publish ALL content views', required=False,
        action="store_true")
    parser.add_argument('-d', '--dryrun', help='Dry Run - Only show what will be published',
        required=False, action="store_true")
    parser.add_argument('-l', '--last', help='Display last promotions', required=False,
        action="store_true")
    parser.add_argument('-c', '--comment', help="Add a custom description", required=False)
    parser.add_argument('-q', '--quiet', help="Suppress progress output updates", required=False,
        action="store_true")
    parser.add_argument('-m', '--forcemeta', help="Force metadata regeneration", required=False,
        action="store_true")

    args = parser.parse_args()

    # Log the fact we are starting
    if not args.last:
        msg = "-------- Content view publish started by " + runuser + " -----------"
        helpers.log_msg(msg, 'INFO')

    # Set our script variables from the input args
    if args.org:
        org_name = args.org
    else:
        org_name = helpers.ORG_NAME
    dry_run = args.dryrun

    # Set up the description that will be added to the published version
    if args.comment:
        description = args.comment
    else:
        description = "Published by " + runuser + "\n via API script"

    # Load the promotion history
    if not os.path.exists(vardir + '/promotions.pkl'):
        if not os.path.exists(vardir):
            os.makedirs(vardir)
        phistory = {}
    else:
        phistory = pickle.load(open(vardir + '/promotions.pkl', 'rb'))

    # Read the promotion history if --last requested
    if args.last:
        if phistory:
            print 'Last promotions:'
            for lenv, time in phistory.iteritems():
                print lenv, time
        else:
            print 'No promotions recorded'
        sys.exit(0)


    publish_list = []
    if not args.all:
        publish_list = helpers.CONFIG['publish']['content_views']

        if not publish_list:
            msg = "Cannot find publish configuration"
            helpers.log_msg(msg, 'ERROR')
            if helpers.MAILOUT:
                helpers.tf.seek(0)
                output = "{}".format(helpers.tf.read())
                helpers.mailout(helpers.MAILSUBJ_FP, output)
            sys.exit(1)

        msg = "Config found for CV's " + str(publish_list)
        helpers.log_msg(msg, 'DEBUG')

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)

    # Get the list of Content Views along with the latest view version in each environment
    (ver_list, ver_descr, ver_version) = get_cv(org_id, publish_list)

    # Publish the content views. Returns a list of task IDs.
    publish(ver_list, ver_descr, ver_version, dry_run, runuser, description, args.quiet,
        args.forcemeta)

    # Add/Update the promotion history dictionary so we can check when we last promoted
    phistory['Library'] = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
    pickle.dump(phistory, open(vardir + '/promotions.pkl', 'wb'))

    # Run the mailout
    if helpers.MAILOUT:
        helpers.tf.seek(0)
        output = "{}".format(helpers.tf.read())
        message = "Publish completed successfully\n\n" + output
        subject = "Satellite 6 publish completed"
        helpers.mailout(subject, message)

    # Exit cleanly
    sys.exit(0)
示例#18
0
def main(args):
    """
    Main routine
    """

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Check for sane input
    parser = argparse.ArgumentParser(
        description='Cleans content views for specified organization.')
    group = parser.add_mutually_exclusive_group()
    # pylint: disable=bad-continuation
    parser.add_argument('-o',
                        '--org',
                        help='Organization (Uses default if not specified)',
                        required=False)
    parser.add_argument(
        '-k',
        '--keep',
        help='How many old versions to keep (only used with -a)',
        required=False)
    group.add_argument('-a',
                       '--all',
                       help='Clean ALL content views',
                       required=False,
                       action="store_true")
    parser.add_argument('-c',
                        '--cleanall',
                        help='Remove orphan versions between in-use views',
                        required=False,
                        action="store_true")
    parser.add_argument(
        '-i',
        '--ignorefirstpromoted',
        help=
        'Version to keep count starts from first CV, not first promoted CV',
        required=False,
        action="store_true")
    parser.add_argument('-d',
                        '--dryrun',
                        help='Dry Run - Only show what will be cleaned',
                        required=False,
                        action="store_true")

    args = parser.parse_args()

    # Log the fact we are starting
    msg = "-------- Content view cleanup started by " + runuser + " -----------"
    helpers.log_msg(msg, 'INFO')

    # Set our script variables from the input args
    if args.org:
        org_name = args.org
    else:
        org_name = helpers.ORG_NAME
    dry_run = args.dryrun
    cleanall = args.cleanall
    ignorefirstpromoted = args.ignorefirstpromoted
    if args.keep:
        keep = args.keep
    else:
        keep = "0"

    cleanup_list = []
    if not args.all:
        cleanup_list = helpers.CONFIG['cleanup']['content_views']

        if not cleanup_list:
            msg = "Cannot find cleanup configuration"
            helpers.log_msg(msg, 'ERROR')
            sys.exit(1)

        msg = "Config found for CV's " + str(cleanup_list)
        helpers.log_msg(msg, 'DEBUG')

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)

    # Get the list of Content Views along with the latest view version in each environment
    (ver_list, ver_descr, ver_keep) = get_cv(org_id, cleanup_list, keep)

    # Clean the content views. Returns a list of task IDs.
    cleanup(ver_list, ver_descr, dry_run, runuser, ver_keep, cleanall,
            ignorefirstpromoted)

    # Exit cleanly
    sys.exit(0)
def main(args):
    """
    Main Routine
    """
    #pylint: disable-msg=R0912,R0914,R0915

    if not helpers.DISCONNECTED:
        msg = "Import cannot be run on the connected Satellite (Sync) host"
        helpers.log_msg(msg, 'ERROR')
        if helpers.MAILOUT:
            helpers.tf.seek(0)
            output = "{}".format(helpers.tf.read())
            helpers.mailout(helpers.MAILSUBJ_FI, output)
        sys.exit(1)

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Set the base dir of the script and where the var data is
    global dir
    global vardir
    dir = os.path.dirname(__file__)
    vardir = os.path.join(dir, 'var')

    # Log the fact we are starting
    msg = "------------- Content import started by " + runuser + " ----------------"
    helpers.log_msg(msg, 'INFO')

    # Check for sane input
    parser = argparse.ArgumentParser(description='Performs Import of Default Content View.')
    # pylint: disable=bad-continuation
    parser.add_argument('-o', '--org', help='Organization (Uses default if not specified)',
        required=False)
    parser.add_argument('-d', '--dataset', \
        help='Date/name of Import dataset to process (YYYY-MM-DD_NAME)', required=False)
    parser.add_argument('-n', '--nosync', help='Do not trigger a sync after extracting content',
        required=False, action="store_true")
    parser.add_argument('-r', '--remove', help='Remove input files after import has completed',
        required=False, action="store_true")
    parser.add_argument('-l', '--last', help='Display the last successful import performed',
        required=False, action="store_true")
    parser.add_argument('-L', '--list', help='List all successfully completed imports',
        required=False, action="store_true")
    parser.add_argument('-c', '--count', help='Display all package counts after import',
        required=False, action="store_true")
    parser.add_argument('-f', '--force', help='Force import of data if it has previously been done',
        required=False, action="store_true")
    parser.add_argument('-u', '--unattended', help='Answer any prompts safely, allowing automated usage',
        required=False, action="store_true")
    parser.add_argument('--fixhistory', help='Force import history to match export history',
        required=False, action="store_true")
    args = parser.parse_args()

    # Set our script variables from the input args
    if args.org:
        org_name = args.org
    else:
        org_name = helpers.ORG_NAME
    dataset = args.dataset

    if args.fixhistory:
        fixhistory = True
    else:
        fixhistory = False

    # Record where we are running from
    script_dir = str(os.getcwd())

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)

    imports = []
    # Read the last imports data
    if os.path.exists(vardir + '/imports.pkl'):
        imports = pickle.load(open(vardir + '/imports.pkl', 'rb'))
        # If we have a string we convert to a list. This should only occur the first time we
        # migrate from the original string version of the pickle.
        if type(imports) is str:
            imports = imports.split()
        last_import = imports[-1]
    # Display the last successful import(s)
    if args.last or args.list:
        if os.path.exists(vardir + '/imports.pkl'):
            if args.last:
                msg = "Last successful import was " + last_import
                helpers.log_msg(msg, 'INFO')
                print msg
            if args.list:
                print "Completed imports:\n----------------"
                for item in imports: print item
        else:
            msg = "Import has never been performed"
            helpers.log_msg(msg, 'INFO')
            print msg
        sys.exit(0)

    # If we got this far without -d being specified, error out cleanly
    if args.dataset is None:
        parser.error("--dataset is required")

    # If we have already imported this dataset let the user know
    if dataset in imports:
        if not args.force:
            msg = "Dataset " + dataset + " has already been imported. Use --force if you really want to do this."
            helpers.log_msg(msg, 'WARNING')
            sys.exit(2)

    # Figure out if we have the specified input fileset
    basename = get_inputfiles(dataset)

    # Cleanup from any previous imports
    os.system("rm -rf " + helpers.IMPORTDIR + "/{content,custom,listing,*.pkl}")

    # Extract the input files
    extract_content(basename)

    # Read in the export history from the input dataset
    dsname = dataset.split('_')[1]
    exports = pickle.load(open(helpers.IMPORTDIR + '/exporthistory_' + dsname + '.pkl', 'rb'))

    # Check for and let the user decide if they want to continue with missing imports
    missing_imports = check_missing(imports, exports, dataset, fixhistory, vardir)
    if missing_imports:
        msg = "Run sat_import with the --fixhistory flag to reset the import history to this export"
        helpers.log_msg(msg, 'INFO')
        if not args.unattended:
            answer = helpers.query_yes_no("Continue with import?", "no")
            if not answer:
                msg = "Import Aborted"
                helpers.log_msg(msg, 'ERROR')
                sys.exit(3)
            else:
                msg = "Import continued by user"
                helpers.log_msg(msg, 'INFO')
        else:
            msg = "Import Aborted"
            helpers.log_msg(msg, 'ERROR')
            if helpers.MAILOUT:
                helpers.tf.seek(0)
                output = "{}".format(helpers.tf.read())
                helpers.mailout(helpers.MAILSUBJ_FI, output)
            sys.exit(3)


    # Trigger a sync of the content into the Library
    if args.nosync:
        #print helpers.GREEN + "Import complete.\n" + helpers.ENDC
        msg = "Repository sync was requested to be skipped"
        helpers.log_msg(msg, 'WARNING')
        print 'Please synchronise all repositories to make new content available for publishing.'
        delete_override = True
    else:
        # We need to figure out which repos to sync. This comes to us via a pickle containing
        # a list of repositories that were exported
        imported_repos = pickle.load(open('exported_repos.pkl', 'rb'))
        package_count = pickle.load(open('package_count.pkl', 'rb'))

        # Run a repo sync on each imported repo
        (delete_override, newrepos) = sync_content(org_id, imported_repos)

        print helpers.GREEN + "Import complete.\n" + helpers.ENDC
        print 'Please publish content views to make new content available.'

        # Verify the repository package/erratum counts match the sync host
        check_counts(org_id, package_count, args.count)

    if os.path.exists(helpers.IMPORTDIR + '/puppetforge'):
        print 'Offline puppet-forge-server bundle is available to import seperately in '\
            + helpers.IMPORTDIR + '/puppetforge\n'


    if args.remove and not delete_override:
        msg = "Removing input files from " + helpers.IMPORTDIR
        helpers.log_msg(msg, 'INFO')
        print msg
        os.system("rm -f " + helpers.IMPORTDIR + "/sat6_export_" + dataset + "*")
        os.system("rm -rf " + helpers.IMPORTDIR + "/{content,custom,listing,*.pkl}")
        excode = 0
    elif delete_override:
        msg = "* Not removing input files due to incomplete sync *"
        helpers.log_msg(msg, 'INFO')
        print msg
        excode = 2
    else:
        msg = " (Removal of input files was not requested)"
        helpers.log_msg(msg, 'INFO')
        print msg
        excode = 0

    msg = "Import Complete"
    helpers.log_msg(msg, 'INFO')

    # Save the last completed import data (append to existing pickle)
    os.chdir(script_dir)
    if not os.path.exists(vardir):
        os.makedirs(vardir)
    imports.append(dataset)
    pickle.dump(imports, open(vardir + '/imports.pkl', "wb"))

    # Run the mailout
    if helpers.MAILOUT:
        helpers.tf.seek(0)
        output = "{}".format(helpers.tf.read())
        if missing_imports:
            message = "Import of dataset " + dataset + " completed successfully.\n\n \
                Missing datasets were detected during the import - please check the logs\n\n" + output
            subject = "Satellite 6 import completed: Missing datasets"

        elif newrepos:
            message = "Import of dataset " + dataset + " completed successfully.\n\n \
                New repos found that need to be imported manually - please check the logs \n\n" + output
            subject = "Satellite 6 import completed: New repos require manual intervention"

        else:
            message = "Import of dataset " + dataset + " completed successfully\n\n" + output
            subject = "Satellite 6 import completed"

        helpers.mailout(subject, message)

    # And exit.
    sys.exit(excode)