def github_repo_select_and_delete(github_user:github.NamedUser.NamedUser,project_path:Path=None):
    myrepo = select_github_repo(github_user)
    repo_name = myrepo.name
    if helpers.query_text('Please type the name of the Repository to delete it. THIS CANNOT BE UNDONE!: ') == repo_name:
        if helpers.query_yes_no(f'Really delete: {repo_name}'):
            myrepo.delete()
            if project_path:
                target_path = project_path / repo_name
                if os.path.exists(target_path):
                    if helpers.query_yes_no(f'Repository: "{repo_name}" was found in: "{target_path}". Delete?'):
                        shutil.rmtree(target_path)
Exemplo n.º 2
0
def check_incomplete_sync():
    """
    Check for any sync tasks that are in an Incomplete state.
    These are not paused or locked, but are the orange 100% complete ones in the UI
    """
    repo_list = helpers.get_json(
        helpers.KATELLO_API + "/content_view_versions")

    # Extract the list of repo ids, then check the state of each one.
    incomplete_sync = False
    for repo in repo_list['results']:
        for repo_id in repo['repositories']:
            repo_status = helpers.get_json(
                helpers.KATELLO_API + "/repositories/" + str(repo_id['id']))

            if repo_status['content_type'] == 'puppet':
                if repo_status['last_sync']['state'] == 'stopped':
                    if repo_status['last_sync']['result'] == 'warning':
                        incomplete_sync = True
                        msg = "Repo ID " + str(repo_id['id']) + " Sync Incomplete"
                        helpers.log_msg(msg, 'DEBUG')

    # If we have detected incomplete sync tasks, ask the user if they want to export anyway.
    # This isn't fatal, but *MAY* lead to inconsistent repositories on the dieconnected sat.
    if incomplete_sync:
        msg = "Incomplete sync jobs detected"
        helpers.log_msg(msg, 'WARNING')
        answer = helpers.query_yes_no("Continue with export?", "no")
        if not answer:
            msg = "Export Aborted"
            helpers.log_msg(msg, 'ERROR')
            sys.exit(-1)
        else:
            msg = "Export continued by user"
            helpers.log_msg(msg, 'INFO')
def github_create_repo_userinp(github_class:github.Github,project_path:Path):
    name = helpers.query_text('Enter Repo Name:')
    if name:
        gitinore = github_select_gitignore_template(github_class)
        private = helpers.query_yes_no('Should the repo Be Public?')
        myrepo = github_create_repo(github_class.get_user(),name,private,True,gitinore)
        if not myrepo:
            print(f"couldn't create repo: {name}")
        else:
            target_path = project_path / myrepo.name
            github_clone(myrepo,target_path)
    def save_file(self, nointeract: bool = False):
        """Save config file if changed

        Keyword Arguments:
            nointeract {bool} -- [skip user interaction] (default: {False})
        """
        if self.settchanged:
            if nointeract:
                savesett = True
            else:
                savesett = helpers.query_yes_no(
                    'Do you want to create settings for the previously entered values?'
                )
            if savesett:
                with open(self.config_path, 'w') as configfile:
                    self.config.write(configfile)
Exemplo n.º 5
0
def check_incomplete_sync():
    """
    Check for any sync tasks that are in an Incomplete state.
    These are not paused or locked, but are the orange 100% complete ones in the UI
    """
    repo_list = helpers.get_json(
        helpers.KATELLO_API + "/content_view_versions")

    # Extract the list of repo ids, then check the state of each one.
    incomplete_sync = False
    for repo in repo_list['results']:
        for repo_id in repo['repositories']:
            repo_status = helpers.get_json(
                helpers.KATELLO_API + "/repositories/" + str(repo_id['id']))

            if repo_status['content_type'] == 'yum':
                if repo_status['last_sync'] is None:
                    if repo_status['url'] is None:
                        msg = "Repo ID " + str(repo_id['id']) + " No Sync Configured"
                        #helpers.log_msg(msg, 'DEBUG')
                elif repo_status['last_sync']['state'] == 'stopped':
                    if repo_status['last_sync']['result'] == 'warning':
                        incomplete_sync = True
                        msg = "Repo ID " + str(repo_id['id']) + " Sync Incomplete"
                        helpers.log_msg(msg, 'DEBUG')

    # If we have detected incomplete sync tasks, ask the user if they want to export anyway.
    # This isn't fatal, but *MAY* lead to inconsistent repositories on the dieconnected sat.
    if incomplete_sync:
        msg = "Incomplete sync jobs detected"
        helpers.log_msg(msg, 'WARNING')
        if not args.unattended:
            answer = helpers.query_yes_no("Continue with export?", "no")
            if not answer:
                msg = "Export Aborted"
                helpers.log_msg(msg, 'ERROR')
                sys.exit(3)
            else:
                msg = "Export continued by user"
                helpers.log_msg(msg, 'INFO')
        else:
            msg = "Export Aborted"
            helpers.log_msg(msg, 'ERROR')
            sys.exit(3)
def github_clone(repo:github.Repository.Repository,target_folder:Path):
    """Clones Github Repository to disk
    Arguments:
        repo {github.Repository.Repository} -- [Repo class]
        target_folder {Path} -- [where to? ]
    """
    if os.path.exists(target_folder):
        if helpers.query_yes_no(f'The Target directory: "{target_folder} alredy exists. Overwrite?'):
            print(f'Removing directory: "{target_folder}"')
            shutil.rmtree(target_folder,onerror=shutil_rmtree_onerror)
        else:
            print('Aborting Clone, Folder alredy exists!')
    if not os.path.exists(target_folder):
        repoorigin = str(repo.clone_url)
        print(f"Cloning from: {repoorigin}")
        pr = subprocess.call(['git', 'clone' , repoorigin,str(target_folder)])
        if pr == 0:
            print(f'repository cloned successfully to: "{target_folder}"')
        else:
            print(f'Colne returned unexpected result: {pr}')
def main():
    """Main worker GitHub
    """
    conf = GitHubConfValues('Config.ini')

    q = [
        {
        'type': 'list',
        'name': 'action',
        'message': 'What to do?',
        'choices': [
            'Clone Repo from GitHub',
            'Create GitHub Repo',
            'Remove Local Repo',
            'Remove Repo from GitHub',
            'Exit!'],
        }
    ]

    while True:
        answer = prompt(q)['action']
        if answer == 'Clone Repo from GitHub':
            github_repo_select_and_clone(conf.github_loggedin().get_user(),conf.project_path())
            return
        elif answer=='Exit!':
            return
        elif answer=='Remove Local Repo':
            fol = project_select_folder(conf.project_path())
            if fol:
                if helpers.query_yes_no(f'Do you really want to delete: {fol}'):
                    print(f'Deleting Folder Locally: "{fol}"')
                    shutil.rmtree(fol,onerror=shutil_rmtree_onerror)
            return
        elif answer == 'Create GitHub Repo':
            github_create_repo_userinp(conf.github_loggedin(),conf.project_path())
            return
        elif answer == 'Remove Repo from GitHub':
            github_repo_select_and_delete(conf.github_loggedin().get_user(),conf.project_path())
            return
        else:
            print('Unsupported Function Selected')
Exemplo n.º 8
0
def check_disk_space(export_type,unattended):
    """
    Check the disk usage of the pulp partition
    For a full export we need at least 50% free, as we spool to /var/lib/pulp.
    """
    pulp_used = str(helpers.disk_usage('/var/lib/pulp'))
    if export_type == 'full' and int(float(pulp_used)) > 50:
        msg = "Insufficient space in /var/lib/pulp for a full export. >50% free space is required."
        helpers.log_msg(msg, 'WARNING')
        if not unattended:
            answer = helpers.query_yes_no("Continue with export?", "no")
            if not answer:
                msg = "Export Aborted"
                helpers.log_msg(msg, 'ERROR')
                sys.exit(3)
            else:
                msg = "Export continued by user"
                helpers.log_msg(msg, 'INFO')
        else:
            msg = "Export Aborted"
            helpers.log_msg(msg, 'ERROR')
            sys.exit(3)
Exemplo n.º 9
0
def main(args):
    """Perform import of Default Content View."""
    #pylint: disable-msg=R0912,R0914,R0915

    if not helpers.DISCONNECTED:
        msg = "Import cannot be run on the connected Satellite (Sync) host"
        helpers.log_msg(msg, 'ERROR')
        if helpers.MAILOUT:
            helpers.tf.seek(0)
            output = "{}".format(helpers.tf.read())
            helpers.mailout(helpers.MAILSUBJ_FI, output)
        sys.exit(1)

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Set the base dir of the script and where the var data is
    global dir
    global vardir
    dir = os.path.dirname(__file__)
    vardir = os.path.join(dir, 'var')

    # Log the fact we are starting
    msg = "------------- Content import started by " + runuser + " ----------------"
    helpers.log_msg(msg, 'INFO')

    # Check for sane input
    parser = argparse.ArgumentParser(
        description='Performs Import of Default Content View.')
    # pylint: disable=bad-continuation
    parser.add_argument('-o',
                        '--org',
                        help='Organization (Uses default if not specified)',
                        required=False)
    parser.add_argument('-d', '--dataset', \
        help='Date/name of Import dataset to process (YYYY-MM-DD_NAME)', required=False)
    parser.add_argument('-n',
                        '--nosync',
                        help='Do not trigger a sync after extracting content',
                        required=False,
                        action="store_true")
    parser.add_argument('-r',
                        '--remove',
                        help='Remove input files after import has completed',
                        required=False,
                        action="store_true")
    parser.add_argument('-l',
                        '--last',
                        help='Display the last successful import performed',
                        required=False,
                        action="store_true")
    parser.add_argument('-L',
                        '--list',
                        help='List all successfully completed imports',
                        required=False,
                        action="store_true")
    parser.add_argument('-c',
                        '--count',
                        help='Display all package counts after import',
                        required=False,
                        action="store_true")
    parser.add_argument(
        '-f',
        '--force',
        help='Force import of data if it has previously been done',
        required=False,
        action="store_true")
    parser.add_argument(
        '-u',
        '--unattended',
        help='Answer any prompts safely, allowing automated usage',
        required=False,
        action="store_true")
    parser.add_argument('--fixhistory',
                        help='Force import history to match export history',
                        required=False,
                        action="store_true")
    args = parser.parse_args()

    # Set our script variables from the input args
    if args.org:
        org_name = args.org
    else:
        org_name = helpers.ORG_NAME
    dataset = args.dataset

    if args.fixhistory:
        fixhistory = True
    else:
        fixhistory = False

    # Record where we are running from
    script_dir = str(os.getcwd())

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)

    imports = []
    # Read the last imports data
    if os.path.exists(vardir + '/imports.pkl'):
        imports = pickle.load(open(vardir + '/imports.pkl', 'rb'))
        # If we have a string we convert to a list. This should only occur the first time we
        # migrate from the original string version of the pickle.
        if type(imports) is str:
            imports = imports.split()
        last_import = imports[-1]
    # Display the last successful import(s)
    if args.last or args.list:
        if os.path.exists(vardir + '/imports.pkl'):
            if args.last:
                msg = "Last successful import was " + last_import
                helpers.log_msg(msg, 'INFO')
                print msg
            if args.list:
                print "Completed imports:\n----------------"
                for item in imports:
                    print item
        else:
            msg = "Import has never been performed"
            helpers.log_msg(msg, 'INFO')
            print msg
        sys.exit(0)

    # If we got this far without -d being specified, error out cleanly
    if args.dataset is None:
        parser.error("--dataset is required")

    # If we have already imported this dataset let the user know
    if dataset in imports:
        if not args.force:
            msg = "Dataset " + dataset + " has already been imported. Use --force if you really want to do this."
            helpers.log_msg(msg, 'WARNING')
            sys.exit(2)

    # Figure out if we have the specified input fileset
    basename = get_inputfiles(dataset)

    # Cleanup from any previous imports
    os.system("rm -rf " + helpers.IMPORTDIR +
              "/{content,custom,listing,*.pkl}")

    # Extract the input files
    extract_content(basename)

    # Read in the export history from the input dataset
    dsname = dataset.split('_')[1]
    exports = pickle.load(
        open(helpers.IMPORTDIR + '/exporthistory_' + dsname + '.pkl', 'rb'))

    # Check for and let the user decide if they want to continue with missing imports
    missing_imports = check_missing(imports, exports, dataset, fixhistory,
                                    vardir)
    if missing_imports:
        msg = "Run sat_import with the --fixhistory flag to reset the import history to this export"
        helpers.log_msg(msg, 'INFO')
        if not args.unattended:
            answer = helpers.query_yes_no("Continue with import?", "no")
            if not answer:
                msg = "Import Aborted"
                helpers.log_msg(msg, 'ERROR')
                sys.exit(3)
            else:
                msg = "Import continued by user"
                helpers.log_msg(msg, 'INFO')
        else:
            msg = "Import Aborted"
            helpers.log_msg(msg, 'ERROR')
            if helpers.MAILOUT:
                helpers.tf.seek(0)
                output = "{}".format(helpers.tf.read())
                helpers.mailout(helpers.MAILSUBJ_FI, output)
            sys.exit(3)

    # Trigger a sync of the content into the Library
    if args.nosync:
        #print helpers.GREEN + "Import complete.\n" + helpers.ENDC
        msg = "Repository sync was requested to be skipped"
        helpers.log_msg(msg, 'WARNING')
        print 'Please synchronise all repositories to make new content available for publishing.'
        delete_override = True
    else:
        # We need to figure out which repos to sync. This comes to us via a pickle containing
        # a list of repositories that were exported
        imported_repos = pickle.load(open('exported_repos.pkl', 'rb'))
        package_count = pickle.load(open('package_count.pkl', 'rb'))

        # Run a repo sync on each imported repo
        (delete_override, newrepos) = sync_content(org_id, imported_repos)

        print helpers.GREEN + "Import complete.\n" + helpers.ENDC
        print 'Please publish content views to make new content available.'

        # Verify the repository package/erratum counts match the sync host
        check_counts(org_id, package_count, args.count)

    if os.path.exists(helpers.IMPORTDIR + '/puppetforge'):
        print 'Offline puppet-forge-server bundle is available to import seperately in '\
            + helpers.IMPORTDIR + '/puppetforge\n'

    if args.remove and not delete_override:
        msg = "Removing input files from " + helpers.IMPORTDIR
        helpers.log_msg(msg, 'INFO')
        print msg
        os.system("rm -f " + helpers.IMPORTDIR + "/sat6_export_" + dataset +
                  "*")
        os.system("rm -rf " + helpers.IMPORTDIR +
                  "/{content,custom,listing,*.pkl}")
        excode = 0
    elif delete_override:
        msg = "* Not removing input files due to incomplete sync *"
        helpers.log_msg(msg, 'INFO')
        print msg
        excode = 2
    else:
        msg = " (Removal of input files was not requested)"
        helpers.log_msg(msg, 'INFO')
        print msg
        excode = 0

    msg = "Import Complete"
    helpers.log_msg(msg, 'INFO')

    # Save the last completed import data (append to existing pickle)
    os.chdir(script_dir)
    if not os.path.exists(vardir):
        os.makedirs(vardir)
    imports.append(dataset)
    pickle.dump(imports, open(vardir + '/imports.pkl', "wb"))

    # Run the mailout
    if helpers.MAILOUT:
        helpers.tf.seek(0)
        output = "{}".format(helpers.tf.read())
        if missing_imports:
            message = "Import of dataset " + dataset + " completed successfully.\n\n \
                Missing datasets were detected during the import - please check the logs\n\n" + output
            subject = "Satellite 6 import completed: Missing datasets"
        elif newrepos:
            message = "Import of dataset " + dataset + " completed successfully.\n\n \
                New repos found that need to be imported manually - please check the logs \n\n" + output
            subject = "Satellite 6 import completed: New repos require manual intervention"
        else:
            message = "Import of dataset " + dataset + " completed successfully\n\n" + output
            subject = "Satellite 6 import completed"
        helpers.mailout(subject, message)

    # And exit.
    sys.exit(excode)
Exemplo n.º 10
0
def main():
    View.print_('Opening workbook...')

    # opening the excel file
    vocbulary_wb = Excel('vocabulary.xlsx')

    vocabulary_dict = vocbulary_wb.get_wb_as_dict()

    # TODO change to singleton
    vocabulary_service = VocabularyService(vocabulary_dict,
                                           vocbulary_wb.get_sheet_names())
    statistic_service = StatisticService(vocabulary_dict)

    try:
        # TODO parse the desired level as a Level data
        desired_level = sys.argv[1]

    except IndexError:
        desired_level = Level.UNKNOWN

    phrase_gen = vocabulary_service.get_next_phrase_generator()

    View.clear_screen()

    while True:

        try:
            statistic_service.show()

            phrase = next(phrase_gen)

            View.print_('Word is: {}.'.format(green(phrase.get_value())))

            known = query_yes_no("Know it?")

            if not known:

                # display context
                View.print_('Context is: {}'.format(cyan(
                    phrase.get_context())))

                Voice.say(phrase.get_context())

                if query_yes_no("And now?"):
                    # sign known level as mid
                    phrase.set_level(Level.MID)

                else:
                    # sign known level as low
                    phrase.set_level(Level.LOW)

            else:
                phrase.set_level(Level.HIGH)
                # print("phrase {} level has been set to {}". format(phrase.get_value, phrase._level))

            # display translations
            query_yes_no(
                yellow(get_display(','.join(
                    phrase.get_translation()))).encode('utf-8'))

        except KeyboardInterrupt:

            # clear screen
            View.clear_screen()

            # save to disk
            vocbulary_wb.save_changes(vocabulary_service.get_vocabulary())

            # prompt continuing
            if query_yes_no("Are you sure you want to quit?"):
                sys.exit(0)

        except StopIteration:
            # clear screen
            View.clear_screen()
            vocbulary_wb.save_changes(vocabulary_service.get_vocabulary())
            View.print_(
                "Well Done! you passed all the terms for level : {} , see you next time! "
                .format(desired_level))
            sys.exit(0)

        except Exception as e:
            traceback.print_exc(file=sys.stdout)
            print(red("Error reading word... {}".format(e)))
            if not query_yes_no("Move to next word?"):
                # save to disk
                vocbulary_wb.save_changes(vocabulary_service.get_vocabulary())
                break
        finally:
            # clear screen
            View.clear_screen()
Exemplo n.º 11
0
DBNAME = conf["MySQL"]["dbname"]
DBCHARSET = conf["MySQL"]["dbcharset"]


def try_drop(cursor, table_name):
    SQL = 'DROP TABLE IF EXISTS ' + table_name
    print(SQL)
    cursor.execute(SQL)


print(
    "Configuring Tables for database configuration: \n \tServer: {0} \n \tDB-User: {1} \n \tDB-Name: {2}"
    .format(DBHOST, DBUSER, DBNAME))
print("\n** ALL EXISTING TABLES AND DATA WILL BE LOST **\n")

response = helpers.query_yes_no("Continue?")

if response:

    ## Tab Config and Make Connection to Database ##

    charTypeShort = "VARCHAR(16) COLLATE utf8_general_ci"
    charTypeMedium = "VARCHAR(64) COLLATE utf8_general_ci"
    charTypeLong = "VARCHAR(768) COLLATE utf8_general_ci"

    print("Connecting to database...", end=" ")
    connection = helpers.db_connection(DBHOST, DBUSER, DBNAME, DBCHARSET)
    cursor = connection.cursor()
    print("connected.")

    #### Table Create Sections ####
def main(args):
    """
    Main Routine
    """
    #pylint: disable-msg=R0912,R0914,R0915

    if not helpers.DISCONNECTED:
        msg = "Import cannot be run on the connected Satellite (Sync) host"
        helpers.log_msg(msg, 'ERROR')
        if helpers.MAILOUT:
            helpers.tf.seek(0)
            output = "{}".format(helpers.tf.read())
            helpers.mailout(helpers.MAILSUBJ_FI, output)
        sys.exit(1)

    # Who is running this script?
    runuser = helpers.who_is_running()

    # Set the base dir of the script and where the var data is
    global dir
    global vardir
    dir = os.path.dirname(__file__)
    vardir = os.path.join(dir, 'var')

    # Log the fact we are starting
    msg = "------------- Content import started by " + runuser + " ----------------"
    helpers.log_msg(msg, 'INFO')

    # Check for sane input
    parser = argparse.ArgumentParser(description='Performs Import of Default Content View.')
    # pylint: disable=bad-continuation
    parser.add_argument('-o', '--org', help='Organization (Uses default if not specified)',
        required=False)
    parser.add_argument('-d', '--dataset', \
        help='Date/name of Import dataset to process (YYYY-MM-DD_NAME)', required=False)
    parser.add_argument('-n', '--nosync', help='Do not trigger a sync after extracting content',
        required=False, action="store_true")
    parser.add_argument('-r', '--remove', help='Remove input files after import has completed',
        required=False, action="store_true")
    parser.add_argument('-l', '--last', help='Display the last successful import performed',
        required=False, action="store_true")
    parser.add_argument('-L', '--list', help='List all successfully completed imports',
        required=False, action="store_true")
    parser.add_argument('-c', '--count', help='Display all package counts after import',
        required=False, action="store_true")
    parser.add_argument('-f', '--force', help='Force import of data if it has previously been done',
        required=False, action="store_true")
    parser.add_argument('-u', '--unattended', help='Answer any prompts safely, allowing automated usage',
        required=False, action="store_true")
    parser.add_argument('--fixhistory', help='Force import history to match export history',
        required=False, action="store_true")
    args = parser.parse_args()

    # Set our script variables from the input args
    if args.org:
        org_name = args.org
    else:
        org_name = helpers.ORG_NAME
    dataset = args.dataset

    if args.fixhistory:
        fixhistory = True
    else:
        fixhistory = False

    # Record where we are running from
    script_dir = str(os.getcwd())

    # Get the org_id (Validates our connection to the API)
    org_id = helpers.get_org_id(org_name)

    imports = []
    # Read the last imports data
    if os.path.exists(vardir + '/imports.pkl'):
        imports = pickle.load(open(vardir + '/imports.pkl', 'rb'))
        # If we have a string we convert to a list. This should only occur the first time we
        # migrate from the original string version of the pickle.
        if type(imports) is str:
            imports = imports.split()
        last_import = imports[-1]
    # Display the last successful import(s)
    if args.last or args.list:
        if os.path.exists(vardir + '/imports.pkl'):
            if args.last:
                msg = "Last successful import was " + last_import
                helpers.log_msg(msg, 'INFO')
                print msg
            if args.list:
                print "Completed imports:\n----------------"
                for item in imports: print item
        else:
            msg = "Import has never been performed"
            helpers.log_msg(msg, 'INFO')
            print msg
        sys.exit(0)

    # If we got this far without -d being specified, error out cleanly
    if args.dataset is None:
        parser.error("--dataset is required")

    # If we have already imported this dataset let the user know
    if dataset in imports:
        if not args.force:
            msg = "Dataset " + dataset + " has already been imported. Use --force if you really want to do this."
            helpers.log_msg(msg, 'WARNING')
            sys.exit(2)

    # Figure out if we have the specified input fileset
    basename = get_inputfiles(dataset)

    # Cleanup from any previous imports
    os.system("rm -rf " + helpers.IMPORTDIR + "/{content,custom,listing,*.pkl}")

    # Extract the input files
    extract_content(basename)

    # Read in the export history from the input dataset
    dsname = dataset.split('_')[1]
    exports = pickle.load(open(helpers.IMPORTDIR + '/exporthistory_' + dsname + '.pkl', 'rb'))

    # Check for and let the user decide if they want to continue with missing imports
    missing_imports = check_missing(imports, exports, dataset, fixhistory, vardir)
    if missing_imports:
        msg = "Run sat_import with the --fixhistory flag to reset the import history to this export"
        helpers.log_msg(msg, 'INFO')
        if not args.unattended:
            answer = helpers.query_yes_no("Continue with import?", "no")
            if not answer:
                msg = "Import Aborted"
                helpers.log_msg(msg, 'ERROR')
                sys.exit(3)
            else:
                msg = "Import continued by user"
                helpers.log_msg(msg, 'INFO')
        else:
            msg = "Import Aborted"
            helpers.log_msg(msg, 'ERROR')
            if helpers.MAILOUT:
                helpers.tf.seek(0)
                output = "{}".format(helpers.tf.read())
                helpers.mailout(helpers.MAILSUBJ_FI, output)
            sys.exit(3)


    # Trigger a sync of the content into the Library
    if args.nosync:
        #print helpers.GREEN + "Import complete.\n" + helpers.ENDC
        msg = "Repository sync was requested to be skipped"
        helpers.log_msg(msg, 'WARNING')
        print 'Please synchronise all repositories to make new content available for publishing.'
        delete_override = True
    else:
        # We need to figure out which repos to sync. This comes to us via a pickle containing
        # a list of repositories that were exported
        imported_repos = pickle.load(open('exported_repos.pkl', 'rb'))
        package_count = pickle.load(open('package_count.pkl', 'rb'))

        # Run a repo sync on each imported repo
        (delete_override, newrepos) = sync_content(org_id, imported_repos)

        print helpers.GREEN + "Import complete.\n" + helpers.ENDC
        print 'Please publish content views to make new content available.'

        # Verify the repository package/erratum counts match the sync host
        check_counts(org_id, package_count, args.count)

    if os.path.exists(helpers.IMPORTDIR + '/puppetforge'):
        print 'Offline puppet-forge-server bundle is available to import seperately in '\
            + helpers.IMPORTDIR + '/puppetforge\n'


    if args.remove and not delete_override:
        msg = "Removing input files from " + helpers.IMPORTDIR
        helpers.log_msg(msg, 'INFO')
        print msg
        os.system("rm -f " + helpers.IMPORTDIR + "/sat6_export_" + dataset + "*")
        os.system("rm -rf " + helpers.IMPORTDIR + "/{content,custom,listing,*.pkl}")
        excode = 0
    elif delete_override:
        msg = "* Not removing input files due to incomplete sync *"
        helpers.log_msg(msg, 'INFO')
        print msg
        excode = 2
    else:
        msg = " (Removal of input files was not requested)"
        helpers.log_msg(msg, 'INFO')
        print msg
        excode = 0

    msg = "Import Complete"
    helpers.log_msg(msg, 'INFO')

    # Save the last completed import data (append to existing pickle)
    os.chdir(script_dir)
    if not os.path.exists(vardir):
        os.makedirs(vardir)
    imports.append(dataset)
    pickle.dump(imports, open(vardir + '/imports.pkl', "wb"))

    # Run the mailout
    if helpers.MAILOUT:
        helpers.tf.seek(0)
        output = "{}".format(helpers.tf.read())
        if missing_imports:
            message = "Import of dataset " + dataset + " completed successfully.\n\n \
                Missing datasets were detected during the import - please check the logs\n\n" + output
            subject = "Satellite 6 import completed: Missing datasets"

        elif newrepos:
            message = "Import of dataset " + dataset + " completed successfully.\n\n \
                New repos found that need to be imported manually - please check the logs \n\n" + output
            subject = "Satellite 6 import completed: New repos require manual intervention"

        else:
            message = "Import of dataset " + dataset + " completed successfully\n\n" + output
            subject = "Satellite 6 import completed"

        helpers.mailout(subject, message)

    # And exit.
    sys.exit(excode)