Ejemplo n.º 1
0
def main():
    module = AnsibleModule(
        argument_spec = dict(
            state=dict(default='present', choices=['present', 'absent'], type='str'),
            name=dict(required=True, type='str'),
            gid=dict(default=None, type='str'),
            system=dict(default=False, type='bool'),
        ),
        supports_check_mode=True
    )

    group = Group(module)

    module.debug('Group instantiated - platform %s' % group.platform)
    if group.distribution:
        module.debug('Group instantiated - distribution %s' % group.distribution)

    rc = None
    out = ''
    err = ''
    result = {}
    result['name'] = group.name
    result['state'] = group.state

    if group.state == 'absent':

        if group.group_exists():
            if module.check_mode:
                module.exit_json(changed=True)
            (rc, out, err) = group.group_del()
            if rc != 0:
                module.fail_json(name=group.name, msg=err)

    elif group.state == 'present':

        if not group.group_exists():
            if module.check_mode:
                module.exit_json(changed=True)
            (rc, out, err) = group.group_add(gid=group.gid, system=group.system)
        else:
            (rc, out, err) = group.group_mod(gid=group.gid)

        if rc is not None and rc != 0:
            module.fail_json(name=group.name, msg=err)

    if rc is None:
        result['changed'] = False
    else:
        result['changed'] = True
    if out:
        result['stdout'] = out
    if err:
        result['stderr'] = err

    if group.group_exists():
        info = group.group_info()
        result['system'] = group.system
        result['gid'] = info[2]

    module.exit_json(**result)
Ejemplo n.º 2
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            name=dict(required=True),
            port=dict(default=623, type='int'),
            state=dict(required=True, choices=['on', 'off', 'shutdown', 'reset', 'boot']),
            user=dict(required=True, no_log=True),
            password=dict(required=True, no_log=True),
            timeout=dict(default=300, type='int'),
        ),
        supports_check_mode=True,
    )

    if command is None:
        module.fail_json(msg='the python pyghmi module is required')

    name = module.params['name']
    port = module.params['port']
    user = module.params['user']
    password = module.params['password']
    state = module.params['state']
    timeout = module.params['timeout']

    # --- run command ---
    try:
        ipmi_cmd = command.Command(
            bmc=name, userid=user, password=password, port=port
        )
        module.debug('ipmi instantiated - name: "%s"' % name)

        current = ipmi_cmd.get_power()
        if current['powerstate'] != state:
            response = {'powerstate': state} if module.check_mode else ipmi_cmd.set_power(state, wait=timeout)
            changed = True
        else:
            response = current
            changed = False

        if 'error' in response:
            module.fail_json(msg=response['error'])

        module.exit_json(changed=changed, **response)
    except Exception as e:
        module.fail_json(msg=str(e))
Ejemplo n.º 3
0
def main():
    module = AnsibleModule(
        argument_spec = dict(
            servers=dict(required=True, type='list'),
            domain=dict(required=True),
            realm=dict(required=True),
            hostname=dict(required=True),
            basedn=dict(required=True),
            principal=dict(required=False),
            subject_base=dict(required=True),
            ca_enabled=dict(required=True, type='bool'),
            mkhomedir=dict(required=False, type='bool'),
            on_master=dict(required=False, type='bool'),
        ),
        supports_check_mode = True,
    )

    module._ansible_debug = True
    servers = module.params.get('servers')
    realm = module.params.get('realm')
    hostname = module.params.get('hostname')
    basedn = module.params.get('basedn')
    domain = module.params.get('domain')
    principal = module.params.get('principal')
    subject_base = module.params.get('subject_base')
    ca_enabled = module.params.get('ca_enabled')
    mkhomedir = module.params.get('mkhomedir')
    on_master = module.params.get('on_master')

    fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE)
    statestore = sysrestore.StateFile(paths.IPA_CLIENT_SYSRESTORE)
    standard_logging_setup(
        paths.IPACLIENT_INSTALL_LOG, verbose=True, debug=False,
        filemode='a', console_format='%(message)s')

    os.environ['KRB5CCNAME'] = paths.IPA_DNS_CCACHE
    
    options.dns_updates = False
    options.all_ip_addresses = False
    options.ip_addresses = None
    options.request_cert = False
    options.hostname = hostname
    options.preserve_sssd = False
    options.on_master = False
    options.conf_ssh = True
    options.conf_sshd = True
    options.conf_sudo = True
    options.primary = False
    options.permit = False
    options.krb5_offline_passwords = False
    options.create_sshfp = True

    ##########################################################################

    # Create IPA NSS database
    try:
        create_ipa_nssdb()
    except ipautil.CalledProcessError as e:
        module.fail_json(msg="Failed to create IPA NSS database: %s" % e)

    # Get CA certificates from the certificate store
    try:
        ca_certs = get_certs_from_ldap(servers[0], basedn, realm,
                                       ca_enabled)
    except errors.NoCertificateError:
        if ca_enabled:
            ca_subject = DN(('CN', 'Certificate Authority'), subject_base)
        else:
            ca_subject = None
        ca_certs = certstore.make_compat_ca_certs(ca_certs, realm,
                                                  ca_subject)
    ca_certs_trust = [(c, n, certstore.key_policy_to_trust_flags(t, True, u))
                      for (c, n, t, u) in ca_certs]

    if hasattr(paths, "KDC_CA_BUNDLE_PEM"):
        x509.write_certificate_list(
            [c for c, n, t, u in ca_certs if t is not False],
            paths.KDC_CA_BUNDLE_PEM)
    if hasattr(paths, "CA_BUNDLE_PEM"):
        x509.write_certificate_list(
            [c for c, n, t, u in ca_certs if t is not False],
            paths.CA_BUNDLE_PEM)

    # Add the CA certificates to the IPA NSS database
    module.debug("Adding CA certificates to the IPA NSS database.")
    ipa_db = certdb.NSSDatabase(paths.IPA_NSSDB_DIR)
    for cert, nickname, trust_flags in ca_certs_trust:
        try:
            ipa_db.add_cert(cert, nickname, trust_flags)
        except CalledProcessError as e:
            module.fail_json(msg="Failed to add %s to the IPA NSS database." % nickname)

    # Add the CA certificates to the platform-dependant systemwide CA store
    tasks.insert_ca_certs_into_systemwide_ca_store(ca_certs)

    if not on_master:
        client_dns(servers[0], hostname, options)
        configure_certmonger(fstore, subject_base, realm, hostname,
                             options, ca_enabled)

    if hasattr(paths, "SSH_CONFIG_DIR"):
        ssh_config_dir = paths.SSH_CONFIG_DIR
    else:
        ssh_config_dir = services.knownservices.sshd.get_config_dir()
    update_ssh_keys(hostname, ssh_config_dir, options.create_sshfp)

    try:
        os.remove(paths.IPA_DNS_CCACHE)
    except Exception:
        pass

    ##########################################################################

    # Name Server Caching Daemon. Disable for SSSD, use otherwise
    # (if installed)
    nscd = services.knownservices.nscd
    if nscd.is_installed():
        if NUM_VERSION < 40500:
            save_state(nscd)
        else:
            save_state(nscd, statestore)

        try:
            nscd_service_action = 'stop'
            nscd.stop()
        except Exception:
            module.warn("Failed to %s the %s daemon" %
                        (nscd_service_action, nscd.service_name))

        try:
            nscd.disable()
        except Exception:
            module.warn("Failed to disable %s daemon. Disable it manually." %
                        nscd.service_name)

    nslcd = services.knownservices.nslcd
    if nslcd.is_installed():
        if NUM_VERSION < 40500:
            save_state(nslcd)
        else:
            save_state(nslcd, statestore)

    ##########################################################################

    # Modify nsswitch/pam stack
    tasks.modify_nsswitch_pam_stack(sssd=True,
                                    mkhomedir=mkhomedir,
                                    statestore=statestore)

    module.log("SSSD enabled")

    argspec = inspect.getargspec(services.service)
    if len(argspec.args) > 1:
        sssd = services.service('sssd', api)
    else:
        sssd = services.service('sssd')
    try:
        sssd.restart()
    except CalledProcessError:
        module.warn("SSSD service restart was unsuccessful.")

    try:
        sssd.enable()
    except CalledProcessError as e:
        module.warn(
            "Failed to enable automatic startup of the SSSD daemon: "
            "%s", e)

    if configure_openldap_conf(fstore, basedn, servers):
        module.log("Configured /etc/openldap/ldap.conf")
    else:
        module.log("Failed to configure /etc/openldap/ldap.conf")

    # Check that nss is working properly
    if not on_master:
        user = principal
        if user is None or user == "":
            user = "******" % domain
            module.log("Principal is not set when enrolling with OTP"
                       "; using principal '%s' for 'getent passwd'" % user)
        elif '@' not in user:
            user = "******" % (user, domain)
        n = 0
        found = False
        # Loop for up to 10 seconds to see if nss is working properly.
        # It can sometimes take a few seconds to connect to the remote
        # provider.
        # Particulary, SSSD might take longer than 6-8 seconds.
        while n < 10 and not found:
            try:
                ipautil.run([paths.GETENT if hasattr(paths, "GETENT") else "getent", "passwd", user])
                found = True
            except Exception as e:
                time.sleep(1)
                n = n + 1

        if not found:
            module.fail_json(msg="Unable to find '%s' user with 'getent "
                             "passwd %s'!" % (user.split("@")[0], user))
            if conf:
                module.log("Recognized configuration: %s" % conf)
            else:
                module.fail_json(msg=
                                 "Unable to reliably detect "
                                 "configuration. Check NSS setup manually.")

            try:
                hardcode_ldap_server(servers)
            except Exception as e:
                module.fail_json(msg="Adding hardcoded server name to "
                                 "/etc/ldap.conf failed: %s" % str(e))

    ##########################################################################

    module.exit_json(changed=True,
                     ca_enabled_ra=ca_enabled)
Ejemplo n.º 4
0
def main():
    global results
    global nim_node

    module = AnsibleModule(
        argument_spec=dict(
            action=dict(type='str', required=True,
                        choices=['update', 'master_setup', 'check', 'compare',
                                 'script', 'allocate', 'deallocate',
                                 'bos_inst', 'define_script', 'remove',
                                 'reset', 'reboot', 'maintenance']),
            description=dict(type='str'),
            lpp_source=dict(type='str'),
            targets=dict(type='list', elements='str'),
            asynchronous=dict(type='bool', default=False),
            device=dict(type='str'),
            script=dict(type='str'),
            resource=dict(type='str'),
            location=dict(type='str'),
            group=dict(type='str'),
            force=dict(type='bool', default=False),
            operation=dict(type='str'),
        ),
        required_if=[
            ['action', 'update', ['targets', 'lpp_source']],
            ['action', 'master_setup', ['device']],
            ['action', 'compare', ['targets']],
            ['action', 'script', ['targets', 'script']],
            ['action', 'allocate', ['targets', 'lpp_source']],
            ['action', 'deallocate', ['targets', 'lpp_source']],
            ['action', 'bos_inst', ['targets', 'group']],
            ['action', 'define_script', ['resource', 'location']],
            ['action', 'remove', ['resource']],
            ['action', 'reset', ['targets']],
            ['action', 'reboot', ['targets']],
            ['action', 'maintenance', ['targets']]
        ]
    )

    results = dict(
        changed=False,
        msg='',
        stdout='',
        stderr='',
        nim_output=[],
    )

    module.debug('*** START ***')

    # =========================================================================
    # Get module params
    # =========================================================================
    lpp_source = module.params['lpp_source']
    targets = module.params['targets']
    asynchronous = module.params['asynchronous']
    device = module.params['device']
    script = module.params['script']
    resource = module.params['resource']
    location = module.params['location']
    group = module.params['group']
    force = module.params['force']
    action = module.params['action']
    operation = module.params['operation']

    params = {}

    description = module.params['description']
    if description is None:
        description = "NIM operation: {} request".format(action)
    params['description'] = description

    # =========================================================================
    # Build nim node info
    # =========================================================================
    build_nim_node(module)

    if action == 'update':
        params['targets'] = targets
        params['lpp_source'] = lpp_source
        params['asynchronous'] = asynchronous
        params['force'] = force
        nim_update(module, params)

    elif action == 'maintenance':
        params['targets'] = targets
        params['operation'] = operation
        nim_maintenance(module, params)

    elif action == 'master_setup':
        params['device'] = device
        nim_master_setup(module, params)

    elif action == 'check':
        params['targets'] = targets
        nim_check(module, params)

    elif action == 'compare':
        params['targets'] = targets
        nim_compare(module, params)

    elif action == 'script':
        params['targets'] = targets
        params['script'] = script
        params['asynchronous'] = asynchronous
        nim_script(module, params)

    elif action == 'allocate':
        params['targets'] = targets
        params['lpp_source'] = lpp_source
        nim_allocate(module, params)

    elif action == 'deallocate':
        params['targets'] = targets
        params['lpp_source'] = lpp_source
        nim_deallocate(module, params)

    elif action == 'bos_inst':
        params['targets'] = targets
        params['group'] = group
        params['script'] = script
        nim_bos_inst(module, params)

    elif action == 'define_script':
        params['resource'] = resource
        params['location'] = location
        nim_define_script(module, params)

    elif action == 'remove':
        params['resource'] = resource
        nim_remove(module, params)

    elif action == 'reset':
        params['targets'] = targets
        params['force'] = force
        nim_reset(module, params)

    elif action == 'reboot':
        params['targets'] = targets
        nim_reboot(module, params)

    else:
        results['msg'] = 'NIM - Error: Unknown action {}'.format(action)
        module.fail_json(**results)

    results['nim_node'] = nim_node
    results['msg'] = 'NIM {} completed successfully'.format(action)
    module.exit_json(**results)
Ejemplo n.º 5
0
def main():
    # The following example playbooks:
    #
    # - cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null"
    #
    # - name: do the job
    #   cron: name="do the job" hour="5,2" job="/some/dir/job.sh"
    #
    # - name: no job
    #   cron: name="an old job" state=absent
    #
    # - name: sets env
    #   cron: name="PATH" env=yes value="/bin:/usr/bin"
    #
    # Would produce:
    # PATH=/bin:/usr/bin
    # # Ansible: check dirs
    # * * 5,2 * * ls -alh > /dev/null
    # # Ansible: do the job
    # * * 5,2 * * /some/dir/job.sh

    module = AnsibleModule(
        argument_spec=dict(
            name=dict(type='str'),
            user=dict(type='str'),
            job=dict(type='str', aliases=['value']),
            cron_file=dict(type='str'),
            state=dict(type='str',
                       default='present',
                       choices=['present', 'absent']),
            backup=dict(type='bool', default=False),
            minute=dict(type='str', default='*'),
            hour=dict(type='str', default='*'),
            day=dict(type='str', default='*', aliases=['dom']),
            month=dict(type='str', default='*'),
            weekday=dict(type='str', default='*', aliases=['dow']),
            reboot=dict(type='bool', default=False),
            special_time=dict(type='str',
                              choices=[
                                  "reboot", "yearly", "annually", "monthly",
                                  "weekly", "daily", "hourly"
                              ]),
            disabled=dict(type='bool', default=False),
            env=dict(type='bool'),
            insertafter=dict(type='str'),
            insertbefore=dict(type='str'),
        ),
        supports_check_mode=True,
        mutually_exclusive=[
            ['reboot', 'special_time'],
            ['insertafter', 'insertbefore'],
        ],
    )

    name = module.params['name']
    user = module.params['user']
    job = module.params['job']
    cron_file = module.params['cron_file']
    state = module.params['state']
    backup = module.params['backup']
    minute = module.params['minute']
    hour = module.params['hour']
    day = module.params['day']
    month = module.params['month']
    weekday = module.params['weekday']
    reboot = module.params['reboot']
    special_time = module.params['special_time']
    disabled = module.params['disabled']
    env = module.params['env']
    insertafter = module.params['insertafter']
    insertbefore = module.params['insertbefore']
    do_install = state == 'present'

    changed = False
    res_args = dict()
    warnings = list()

    if cron_file:
        cron_file_basename = os.path.basename(cron_file)
        if not re.search(r'^[A-Z0-9_-]+$', cron_file_basename, re.I):
            warnings.append(
                'Filename portion of cron_file ("%s") should consist' %
                cron_file_basename +
                ' solely of upper- and lower-case letters, digits, underscores, and hyphens'
            )

    # Ensure all files generated are only writable by the owning user.  Primarily relevant for the cron_file option.
    os.umask(int('022', 8))
    crontab = CronTab(module, user, cron_file)

    module.debug('cron instantiated - name: "%s"' % name)

    if not name:
        module.deprecate(
            msg="The 'name' parameter will be required in future releases.",
            version='2.12',
            collection_name='ansible.builtin')
    if reboot:
        module.deprecate(
            msg=
            "The 'reboot' parameter will be removed in future releases. Use 'special_time' option instead.",
            version='2.12',
            collection_name='ansible.builtin')

    if module._diff:
        diff = dict()
        diff['before'] = crontab.n_existing
        if crontab.cron_file:
            diff['before_header'] = crontab.cron_file
        else:
            if crontab.user:
                diff['before_header'] = 'crontab for user "%s"' % crontab.user
            else:
                diff['before_header'] = 'crontab'

    # --- user input validation ---

    if env and not name:
        module.fail_json(
            msg=
            "You must specify 'name' while working with environment variables (env=yes)"
        )

    if (special_time or reboot) and \
       (True in [(x != '*') for x in [minute, hour, day, month, weekday]]):
        module.fail_json(
            msg="You must specify time and date fields or special time.")

    # cannot support special_time on solaris
    if (special_time or reboot) and platform.system() == 'SunOS':
        module.fail_json(
            msg="Solaris does not support special_time=... or @reboot")

    if cron_file and do_install:
        if not user:
            module.fail_json(
                msg=
                "To use cron_file=... parameter you must specify user=... as well"
            )

    if job is None and do_install:
        module.fail_json(
            msg="You must specify 'job' to install a new cron job or variable")

    if (insertafter or insertbefore) and not env and do_install:
        module.fail_json(
            msg=
            "Insertafter and insertbefore parameters are valid only with env=yes"
        )

    if reboot:
        special_time = "reboot"

    # if requested make a backup before making a change
    if backup and not module.check_mode:
        (backuph, backup_file) = tempfile.mkstemp(prefix='crontab')
        crontab.write(backup_file)

    if crontab.cron_file and not do_install:
        if module._diff:
            diff['after'] = ''
            diff['after_header'] = '/dev/null'
        else:
            diff = dict()
        if module.check_mode:
            changed = os.path.isfile(crontab.cron_file)
        else:
            changed = crontab.remove_job_file()
        module.exit_json(changed=changed,
                         cron_file=cron_file,
                         state=state,
                         diff=diff)

    if env:
        if ' ' in name:
            module.fail_json(msg="Invalid name for environment variable")
        decl = '%s="%s"' % (name, job)
        old_decl = crontab.find_env(name)

        if do_install:
            if len(old_decl) == 0:
                crontab.add_env(decl, insertafter, insertbefore)
                changed = True
            if len(old_decl) > 0 and old_decl[1] != decl:
                crontab.update_env(name, decl)
                changed = True
        else:
            if len(old_decl) > 0:
                crontab.remove_env(name)
                changed = True
    else:
        if do_install:
            for char in ['\r', '\n']:
                if char in job.strip('\r\n'):
                    warnings.append('Job should not contain line breaks')
                    break

            job = crontab.get_cron_job(minute, hour, day, month, weekday, job,
                                       special_time, disabled)
            old_job = crontab.find_job(name, job)

            if len(old_job) == 0:
                crontab.add_job(name, job)
                changed = True
            if len(old_job) > 0 and old_job[1] != job:
                crontab.update_job(name, job)
                changed = True
            if len(old_job) > 2:
                crontab.update_job(name, job)
                changed = True
        else:
            old_job = crontab.find_job(name)

            if len(old_job) > 0:
                crontab.remove_job(name)
                changed = True

    # no changes to env/job, but existing crontab needs a terminating newline
    if not changed and crontab.n_existing != '':
        if not (crontab.n_existing.endswith('\r')
                or crontab.n_existing.endswith('\n')):
            changed = True

    res_args = dict(jobs=crontab.get_jobnames(),
                    envs=crontab.get_envnames(),
                    warnings=warnings,
                    changed=changed)

    if changed:
        if not module.check_mode:
            crontab.write()
        if module._diff:
            diff['after'] = crontab.render()
            if crontab.cron_file:
                diff['after_header'] = crontab.cron_file
            else:
                if crontab.user:
                    diff[
                        'after_header'] = 'crontab for user "%s"' % crontab.user
                else:
                    diff['after_header'] = 'crontab'

            res_args['diff'] = diff

    # retain the backup only if crontab or cron file have changed
    if backup and not module.check_mode:
        if changed:
            res_args['backup_file'] = backup_file
        else:
            os.unlink(backup_file)

    if cron_file:
        res_args['cron_file'] = cron_file

    module.exit_json(**res_args)

    # --- should never get here
    module.exit_json(msg="Unable to execute cron task.")
Ejemplo n.º 6
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            servers=dict(required=False, type='list', default=[]),
            domain=dict(required=False),
            realm=dict(required=False),
            hostname=dict(required=False),
            ca_cert_file=dict(required=False),
            on_master=dict(required=False, type='bool', default=False),
            ntp_servers=dict(required=False, type='list', default=[]),
            ntp_pool=dict(required=False),
            no_ntp=dict(required=False, type='bool', default=False),
            #no_nisdomain=dict(required=False, type='bool', default='no'),
            #nisdomain=dict(required=False),
        ),
        supports_check_mode=True,
    )

    module._ansible_debug = True
    options.domain = module.params.get('domain')
    options.servers = module.params.get('servers')
    options.realm = module.params.get('realm')
    options.hostname = module.params.get('hostname')
    options.ca_cert_file = module.params.get('ca_cert_file')
    options.on_master = module.params.get('on_master')
    options.ntp_servers = module.params.get('ntp_servers')
    options.ntp_pool = module.params.get('ntp_pool')
    options.no_ntp = module.params.get('no_ntp')
    options.conf_ntp = not options.no_ntp
    #options.no_nisdomain = module.params.get('no_nisdomain')
    #options.nisdomain = module.params.get('nisdomain')
    #options.ip_addresses
    #options.all_ip_addresses
    #options.enable_dns_updates

    hostname = None
    hostname_source = None
    dnsok = False
    cli_domain = None
    cli_server = None
    cli_realm = None
    cli_kdc = None
    client_domain = None
    cli_basedn = None

    fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE)
    statestore = sysrestore.StateFile(paths.IPA_CLIENT_SYSRESTORE)

    if options.ntp_servers and options.no_ntp:
        module.fail_json("--ntp-server cannot be used together with --no-ntp")

    if options.ntp_pool and options.no_ntp:
        module.fail_json("--ntp-pool cannot be used together with --no-ntp")

    #if options.no_nisdomain and options.nisdomain:
    #    module.fail_json(
    #        "--no-nisdomain cannot be used together with --nisdomain")

    #if options.ip_addresses:
    #    if options.enable_dns_updates:
    #        module.fail_json(
    #            "--ip-addresses cannot be used together with"
    #            " --enable-dns-updates")

    #    if options.all_ip_addresses:
    #        module.fail_json(
    #            "--ip-address cannot be used together with"
    #            "--all-ip-addresses")

    if options.hostname:
        hostname = options.hostname
        hostname_source = 'Provided as option'
    else:
        hostname = socket.getfqdn()
        hostname_source = "Machine's FQDN"
    if hostname != hostname.lower():
        module.fail_json(msg="Invalid hostname '%s', must be lower-case." %
                         hostname)

    if (hostname == 'localhost') or (hostname == 'localhost.localdomain'):
        module.fail_json(msg="Invalid hostname, '%s' must not be used." %
                         hostname)

    # Get domain from first server if domain is not set, but there are servers
    if options.domain is None and len(options.servers) > 0:
        options.domain = options.servers[0][options.servers[0].find(".") + 1:]

    # Create the discovery instance
    ds = ipadiscovery.IPADiscovery()

    ret = ds.search(domain=options.domain,
                    servers=options.servers,
                    realm=options.realm,
                    hostname=hostname,
                    ca_cert_path=get_cert_path(options.ca_cert_file))

    if options.servers and ret != 0:
        # There is no point to continue with installation as server list was
        # passed as a fixed list of server and thus we cannot discover any
        # better result
        module.fail_json(msg="Failed to verify that %s is an IPA Server." % \
                         ', '.join(options.servers))

    if ret == ipadiscovery.BAD_HOST_CONFIG:
        module.fail_json(msg="Can't get the fully qualified name of this host")
    if ret == ipadiscovery.NOT_FQDN:
        module.fail_json(msg="%s is not a fully-qualified hostname" % hostname)
    if ret in (ipadiscovery.NO_LDAP_SERVER, ipadiscovery.NOT_IPA_SERVER) \
            or not ds.domain:
        if ret == ipadiscovery.NO_LDAP_SERVER:
            if ds.server:
                module.log("%s is not an LDAP server" % ds.server)
            else:
                module.log("No LDAP server found")
        elif ret == ipadiscovery.NOT_IPA_SERVER:
            if ds.server:
                module.log("%s is not an IPA server" % ds.server)
            else:
                module.log("No IPA server found")
        else:
            module.log("Domain not found")
        if options.domain:
            cli_domain = options.domain
            cli_domain_source = 'Provided as option'
        else:
            module.fail_json(msg="Unable to discover domain, not provided")

        ret = ds.search(domain=cli_domain,
                        servers=options.servers,
                        hostname=hostname,
                        ca_cert_path=get_cert_path(options.ca_cert_file))

    if not cli_domain:
        if ds.domain:
            cli_domain = ds.domain
            cli_domain_source = ds.domain_source
            module.debug("will use discovered domain: %s" % cli_domain)

    client_domain = hostname[hostname.find(".") + 1:]

    if ret in (ipadiscovery.NO_LDAP_SERVER, ipadiscovery.NOT_IPA_SERVER) \
            or not ds.server:
        module.debug("IPA Server not found")
        if options.servers:
            cli_server = options.servers
            cli_server_source = 'Provided as option'
        else:
            module.fail_json(msg="Unable to find IPA Server to join")

        ret = ds.search(domain=cli_domain,
                        servers=cli_server,
                        hostname=hostname,
                        ca_cert_path=get_cert_path(options.ca_cert_file))

    else:
        # Only set dnsok to True if we were not passed in one or more servers
        # and if DNS discovery actually worked.
        if not options.servers:
            (server, domain) = ds.check_domain(ds.domain, set(),
                                               "Validating DNS Discovery")
            if server and domain:
                module.debug("DNS validated, enabling discovery")
                dnsok = True
            else:
                module.debug("DNS discovery failed, disabling discovery")
        else:
            module.debug(
                "Using servers from command line, disabling DNS discovery")

    if not cli_server:
        if options.servers:
            cli_server = ds.servers
            cli_server_source = 'Provided as option'
            module.debug("will use provided server: %s" %
                         ', '.join(options.servers))
        elif ds.server:
            cli_server = ds.servers
            cli_server_source = ds.server_source
            module.debug("will use discovered server: %s" % cli_server[0])

    if ret == ipadiscovery.NOT_IPA_SERVER:
        module.fail_json(msg="%s is not an IPA v2 Server." % cli_server[0])

    if ret == ipadiscovery.NO_ACCESS_TO_LDAP:
        module.warn("Anonymous access to the LDAP server is disabled.")
        ret = 0

    if ret == ipadiscovery.NO_TLS_LDAP:
        module.warn(
            "The LDAP server requires TLS is but we do not have the CA.")
        ret = 0

    if ret != 0:
        module.fail_json(msg="Failed to verify that %s is an IPA Server." %
                         cli_server[0])

    cli_kdc = ds.kdc
    if dnsok and not cli_kdc:
        module.fail_json(msg="DNS domain '%s' is not configured for automatic "
                         "KDC address lookup." % ds.realm.lower())

    if dnsok:
        module.log("Discovery was successful!")

    cli_realm = ds.realm
    cli_realm_source = ds.realm_source
    module.debug("will use discovered realm: %s" % cli_realm)

    if options.realm and options.realm != cli_realm:
        module.fail_json(
            msg=
            "The provided realm name [%s] does not match discovered one [%s]" %
            (options.realm, cli_realm))

    cli_basedn = str(ds.basedn)
    cli_basedn_source = ds.basedn_source
    module.debug("will use discovered basedn: %s" % cli_basedn)

    module.log("Client hostname: %s" % hostname)
    module.debug("Hostname source: %s" % hostname_source)
    module.log("Realm: %s" % cli_realm)
    module.debug("Realm source: %s" % cli_realm_source)
    module.log("DNS Domain: %s" % cli_domain)
    module.debug("DNS Domain source: %s" % cli_domain_source)
    module.log("IPA Server: %s" % ', '.join(cli_server))
    module.debug("IPA Server source: %s" % cli_server_source)
    module.log("BaseDN: %s" % cli_basedn)
    module.debug("BaseDN source: %s" % cli_basedn_source)

    # ipa-join would fail with IP address instead of a FQDN
    for srv in cli_server:
        try:
            socket.inet_pton(socket.AF_INET, srv)
            is_ipaddr = True
        except socket.error:
            try:
                socket.inet_pton(socket.AF_INET6, srv)
                is_ipaddr = True
            except socket.error:
                is_ipaddr = False

        if is_ipaddr:
            module.warn("It seems that you are using an IP address "
                        "instead of FQDN as an argument to --server. The "
                        "installation may fail.")
            break

    ntp_servers = []
    if sync_time is not None:
        if options.conf_ntp:
            # Attempt to configure and sync time with NTP server (chrony).
            sync_time(options, fstore, statestore)
        elif options.on_master:
            # If we're on master skipping the time sync here because it was done
            # in ipa-server-install
            logger.info(
                "Skipping attempt to configure and synchronize time with"
                " chrony server as it has been already done on master.")
        else:
            logger.info("Skipping chrony configuration")

    elif not options.on_master and options.conf_ntp:
        # Attempt to sync time with IPA server.
        # If we're skipping NTP configuration, we also skip the time sync here.
        # We assume that NTP servers are discoverable through SRV records
        # in the DNS.
        # If that fails, we try to sync directly with IPA server,
        # assuming it runs NTP
        if len(options.ntp_servers) < 1:
            # Detect NTP servers
            ds = ipadiscovery.IPADiscovery()
            ntp_servers = ds.ipadns_search_srv(cli_domain,
                                               '_ntp._udp',
                                               None,
                                               break_on_first=False)
        else:
            ntp_servers = options.ntp_servers

        # Attempt to sync time:
        # At first with given or dicovered time servers. If no ntp
        # servers have been given or discovered, then with the ipa
        # server.
        module.log('Synchronizing time ...')
        synced_ntp = False
        # use user specified NTP servers if there are any
        for s in ntp_servers:
            synced_ntp = timeconf.synconce_ntp(s, False)
            if synced_ntp:
                break
        if not synced_ntp and not ntp_servers:
            synced_ntp = timeconf.synconce_ntp(cli_server[0], False)
        if not synced_ntp:
            module.warn("Unable to sync time with NTP server")

    # Check if ipa client is already configured
    if is_client_configured():
        # Check that realm and domain match
        current_config = get_ipa_conf()
        if cli_domain != current_config.get('domain'):
            return module.fail_json(msg="IPA client already installed "
                                    "with a conflicting domain")
        if cli_realm != current_config.get('realm'):
            return module.fail_json(msg="IPA client already installed "
                                    "with a conflicting realm")

    # Done
    module.exit_json(changed=True,
                     servers=cli_server,
                     domain=cli_domain,
                     realm=cli_realm,
                     kdc=cli_kdc,
                     basedn=cli_basedn,
                     hostname=hostname,
                     client_domain=client_domain,
                     dnsok=dnsok,
                     ntp_servers=ntp_servers,
                     ipa_python_version=IPA_PYTHON_VERSION)
Ejemplo n.º 7
0
def main():
    module = AnsibleModule(argument_spec=dict(
        log=dict(required=False,
                 default='INFO',
                 choices=['DEBUG', 'INFO', 'ERROR', 'CRITICAL']),
        appliance=dict(required=True),
        lmi_port=dict(required=False, default=443, type='int'),
        action=dict(required=True),
        force=dict(required=False, default=False, type='bool'),
        username=dict(required=False),
        password=dict(required=True),
        isdsapi=dict(required=False, type='dict'),
        adminProxyProtocol=dict(required=False,
                                default='https',
                                choices=['http', 'https']),
        adminProxyHostname=dict(required=False),
        adminProxyPort=dict(required=False, default=443, type='int'),
        adminProxyApplianceShortName=dict(required=False,
                                          default=False,
                                          type='bool'),
        omitAdminProxy=dict(required=False, default=False, type='bool')),
                           supports_check_mode=True)

    module.debug('Started isds module')

    # Process all Arguments
    logLevel = module.params['log']
    force = module.params['force']
    action = module.params['action']
    appliance = module.params['appliance']
    lmi_port = module.params['lmi_port']
    username = module.params['username']
    password = module.params['password']
    adminProxyProtocol = module.params['adminProxyProtocol']
    adminProxyHostname = module.params['adminProxyHostname']
    adminProxyPort = module.params['adminProxyPort']
    adminProxyApplianceShortName = module.params[
        'adminProxyApplianceShortName']
    omitAdminProxy = module.params['omitAdminProxy']

    # Setup logging for format, set log level and redirect to string
    strlog = StringIO()
    DEFAULT_LOGGING = {
        'version': 1,
        'disable_existing_loggers': False,
        'formatters': {
            'standard': {
                'format':
                '[%(asctime)s] [PID:%(process)d TID:%(thread)d] [%(levelname)s] [%(name)s] [%(funcName)s():%(lineno)s] %(message)s'
            },
        },
        'handlers': {
            'default': {
                'level': logLevel,
                'formatter': 'standard',
                'class': 'logging.StreamHandler',
                'stream': strlog
            },
        },
        'loggers': {
            '': {
                'handlers': ['default'],
                'level': logLevel,
                'propagate': True
            },
            'requests.packages.urllib3.connectionpool': {
                'handlers': ['default'],
                'level': 'ERROR',
                'propagate': True
            }
        }
    }
    logging.config.dictConfig(DEFAULT_LOGGING)

    # Create appliance object to be used for all calls
    if username == '' or username is None:
        u = ApplianceUser(password=password)
    else:
        u = ApplianceUser(username=username, password=password)

    # Create appliance object to be used for all calls
    # if adminProxy hostname is set, use the ISDSApplianceAdminProxy
    if adminProxyHostname == '' or adminProxyHostname is None or omitAdminProxy:
        isds_server = ISDSAppliance(hostname=appliance,
                                    user=u,
                                    lmi_port=lmi_port)
    else:
        isds_server = ISDSApplianceAdminProxy(
            adminProxyHostname=adminProxyHostname,
            user=u,
            hostname=appliance,
            adminProxyProtocol=adminProxyProtocol,
            adminProxyPort=adminProxyPort,
            adminProxyApplianceShortName=adminProxyApplianceShortName)

    # Create options string to pass to action method
    options = 'isdsAppliance=isds_server, force=' + str(force)
    if module.check_mode is True:
        options = options + ', check_mode=True'
    if isinstance(module.params['isdsapi'], dict):
        for key, value in module.params['isdsapi'].iteritems():
            if isinstance(value, basestring):
                options = options + ', ' + key + '="' + value + '"'
            else:
                options = options + ', ' + key + '=' + str(value)
    module.debug('Option to be passed to action: ' + options)

    # Dynamically process the action to be invoked
    # Simple check to restrict calls to just "isds" ones for safety
    if action.startswith('ibmsecurity.isds.'):
        try:
            module_name, method_name = action.rsplit('.', 1)
            module.debug('Action method to be imported from module: ' +
                         module_name)
            module.debug('Action method name is: ' + method_name)
            mod = importlib.import_module(module_name)
            func_ptr = getattr(
                mod, method_name)  # Convert action to actual function pointer
            func_call = 'func_ptr(' + options + ')'

            startd = datetime.datetime.now()

            # Execute requested 'action'
            ret_obj = eval(func_call)

            endd = datetime.datetime.now()
            delta = endd - startd

            ret_obj['stdout'] = strlog.getvalue()
            ret_obj['stdout_lines'] = strlog.getvalue().split()
            ret_obj['start'] = str(startd)
            ret_obj['end'] = str(endd)
            ret_obj['delta'] = str(delta)
            ret_obj['cmd'] = action + "(" + options + ")"
            ret_obj['ansible_facts'] = isds_server.facts

            module.exit_json(**ret_obj)

        except ImportError:
            module.fail_json(
                name=action,
                msg='Error> action belongs to a module that is not found!',
                log=strlog.getvalue())
        except AttributeError:
            module.fail_json(
                name=action,
                msg=
                'Error> invalid action was specified, method not found in module!',
                log=strlog.getvalue())
        except TypeError:
            module.fail_json(
                name=action,
                msg=
                'Error> action does not have the right set of arguments or there is a code bug! Options: '
                + options,
                log=strlog.getvalue())
        except IBMError as e:
            module.fail_json(name=action, msg=str(e), log=strlog.getvalue())
    else:
        module.fail_json(
            name=action,
            msg='Error> invalid action specified, needs to be isds!',
            log=strlog.getvalue())
Ejemplo n.º 8
0
def main():
    # The following example playbooks:
    #
    # - cronvar: name="SHELL" value="/bin/bash"
    #
    # - name: Set the email
    #   cronvar: name="EMAILTO" value="*****@*****.**"
    #
    # - name: Get rid of the old new host variable
    #   cronvar: name="NEW_HOST" state=absent
    #
    # Would produce:
    # SHELL = /bin/bash
    # EMAILTO = [email protected]

    module = AnsibleModule(
        argument_spec=dict(
            name=dict(type='str', required=True),
            value=dict(type='str'),
            user=dict(type='str'),
            cron_file=dict(type='str'),
            insertafter=dict(type='str'),
            insertbefore=dict(type='str'),
            state=dict(type='str', default='present', choices=['absent', 'present']),
            backup=dict(type='bool', default=False),
        ),
        mutually_exclusive=[['insertbefore', 'insertafter']],
        supports_check_mode=False,
    )

    name = module.params['name']
    value = module.params['value']
    user = module.params['user']
    cron_file = module.params['cron_file']
    insertafter = module.params['insertafter']
    insertbefore = module.params['insertbefore']
    state = module.params['state']
    backup = module.params['backup']
    ensure_present = state == 'present'

    changed = False
    res_args = dict()

    # Ensure all files generated are only writable by the owning user.  Primarily relevant for the cron_file option.
    os.umask(int('022', 8))
    cronvar = CronVar(module, user, cron_file)

    module.debug('cronvar instantiated - name: "%s"' % name)

    # --- user input validation ---

    if name is None and ensure_present:
        module.fail_json(msg="You must specify 'name' to insert a new cron variabale")

    if value is None and ensure_present:
        module.fail_json(msg="You must specify 'value' to insert a new cron variable")

    if name is None and not ensure_present:
        module.fail_json(msg="You must specify 'name' to remove a cron variable")

    # if requested make a backup before making a change
    if backup:
        (_, backup_file) = tempfile.mkstemp(prefix='cronvar')
        cronvar.write(backup_file)

    if cronvar.cron_file and not name and not ensure_present:
        changed = cronvar.remove_job_file()
        module.exit_json(changed=changed, cron_file=cron_file, state=state)

    old_value = cronvar.find_variable(name)

    if ensure_present:
        if old_value is None:
            cronvar.add_variable(name, value, insertbefore, insertafter)
            changed = True
        elif old_value != value:
            cronvar.update_variable(name, value)
            changed = True
    else:
        if old_value is not None:
            cronvar.remove_variable(name)
            changed = True

    res_args = {
        "vars": cronvar.get_var_names(),
        "changed": changed
    }

    if changed:
        cronvar.write()

    # retain the backup only if crontab or cron file have changed
    if backup:
        if changed:
            res_args['backup_file'] = backup_file
        else:
            os.unlink(backup_file)

    if cron_file:
        res_args['cron_file'] = cron_file

    module.exit_json(**res_args)
Ejemplo n.º 9
0
def main():
    module = AnsibleModule(argument_spec=dict(
        name=dict(type='list', elements='str', required=True),
        state=dict(
            type='str',
            default='present',
            choices=['absent', 'installed', 'latest', 'present', 'removed']),
        build=dict(type='bool', default=False),
        snapshot=dict(type='bool', default=False),
        ports_dir=dict(type='path', default='/usr/ports'),
        quick=dict(type='bool', default=False),
        clean=dict(type='bool', default=False),
    ),
                           mutually_exclusive=[['snapshot', 'build']],
                           supports_check_mode=True)

    name = module.params['name']
    state = module.params['state']
    build = module.params['build']
    ports_dir = module.params['ports_dir']

    rc = 0
    stdout = ''
    stderr = ''
    result = {}
    result['name'] = name
    result['state'] = state
    result['build'] = build

    # The data structure used to keep track of package information.
    pkg_spec = {}

    if build is True:
        if not os.path.isdir(ports_dir):
            module.fail_json(
                msg="the ports source directory %s does not exist" %
                (ports_dir))

        # build sqlports if its not installed yet
        parse_package_name(['sqlports'], pkg_spec, module)
        get_package_state(['sqlports'], pkg_spec, module)
        if not pkg_spec['sqlports']['installed_state']:
            module.debug("main(): installing 'sqlports' because build=%s" %
                         module.params['build'])
            package_present(['sqlports'], pkg_spec, module)

    asterisk_name = False
    for n in name:
        if n == '*':
            if len(name) != 1:
                module.fail_json(
                    msg="the package name '*' can not be mixed with other names"
                )

            asterisk_name = True

    if asterisk_name:
        if state != 'latest':
            module.fail_json(
                msg="the package name '*' is only valid when using state=latest"
            )
        else:
            # Perform an upgrade of all installed packages.
            upgrade_packages(pkg_spec, module)
    else:
        # Parse package names and put results in the pkg_spec dictionary.
        parse_package_name(name, pkg_spec, module)

        # Not sure how the branch syntax is supposed to play together
        # with build mode. Disable it for now.
        for n in name:
            if pkg_spec[n]['branch'] and module.params['build'] is True:
                module.fail_json(
                    msg=
                    "the combination of 'branch' syntax and build=%s is not supported: %s"
                    % (module.params['build'], n))

        # Get state for all package names.
        get_package_state(name, pkg_spec, module)

        # Perform requested action.
        if state in ['installed', 'present']:
            package_present(name, pkg_spec, module)
        elif state in ['absent', 'removed']:
            package_absent(name, pkg_spec, module)
        elif state == 'latest':
            package_latest(name, pkg_spec, module)

    # The combined changed status for all requested packages. If anything
    # is changed this is set to True.
    combined_changed = False

    # The combined failed status for all requested packages. If anything
    # failed this is set to True.
    combined_failed = False

    # We combine all error messages in this comma separated string, for example:
    # "msg": "Can't find nmapp\n, Can't find nmappp\n"
    combined_error_message = ''

    # Loop over all requested package names and check if anything failed or
    # changed.
    for n in name:
        if pkg_spec[n]['rc'] != 0:
            combined_failed = True
            if pkg_spec[n]['stderr']:
                if combined_error_message:
                    combined_error_message += ", %s" % pkg_spec[n]['stderr']
                else:
                    combined_error_message = pkg_spec[n]['stderr']
            else:
                if combined_error_message:
                    combined_error_message += ", %s" % pkg_spec[n]['stdout']
                else:
                    combined_error_message = pkg_spec[n]['stdout']

        if pkg_spec[n]['changed'] is True:
            combined_changed = True

    # If combined_error_message contains anything at least some part of the
    # list of requested package names failed.
    if combined_failed:
        module.fail_json(msg=combined_error_message, **result)

    result['changed'] = combined_changed

    module.exit_json(**result)
Ejemplo n.º 10
0
def main():
    # The following example playbooks:
    #
    # - cronvar: name="SHELL" value="/bin/bash"
    #
    # - name: Set the email
    #   cronvar: name="EMAILTO" value="*****@*****.**"
    #
    # - name: Get rid of the old new host variable
    #   cronvar: name="NEW_HOST" state=absent
    #
    # Would produce:
    # SHELL = /bin/bash
    # EMAILTO = [email protected]

    module = AnsibleModule(
        argument_spec=dict(
            name=dict(type='str', required=True),
            value=dict(type='str'),
            user=dict(type='str'),
            cron_file=dict(type='str'),
            insertafter=dict(type='str'),
            insertbefore=dict(type='str'),
            state=dict(type='str',
                       default='present',
                       choices=['absent', 'present']),
            backup=dict(type='bool', default=False),
        ),
        mutually_exclusive=[['insertbefore', 'insertafter']],
        supports_check_mode=False,
    )

    name = module.params['name']
    value = module.params['value']
    user = module.params['user']
    cron_file = module.params['cron_file']
    insertafter = module.params['insertafter']
    insertbefore = module.params['insertbefore']
    state = module.params['state']
    backup = module.params['backup']
    ensure_present = state == 'present'

    changed = False
    res_args = dict()

    # Ensure all files generated are only writable by the owning user.  Primarily relevant for the cron_file option.
    os.umask(int('022', 8))
    cronvar = CronVar(module, user, cron_file)

    module.debug('cronvar instantiated - name: "%s"' % name)

    # --- user input validation ---

    if name is None and ensure_present:
        module.fail_json(
            msg="You must specify 'name' to insert a new cron variable")

    if value is None and ensure_present:
        module.fail_json(
            msg="You must specify 'value' to insert a new cron variable")

    if name is None and not ensure_present:
        module.fail_json(
            msg="You must specify 'name' to remove a cron variable")

    # if requested make a backup before making a change
    if backup:
        (_, backup_file) = tempfile.mkstemp(prefix='cronvar')
        cronvar.write(backup_file)

    if cronvar.cron_file and not name and not ensure_present:
        changed = cronvar.remove_job_file()
        module.exit_json(changed=changed, cron_file=cron_file, state=state)

    old_value = cronvar.find_variable(name)

    if ensure_present:
        if old_value is None:
            cronvar.add_variable(name, value, insertbefore, insertafter)
            changed = True
        elif old_value != value:
            cronvar.update_variable(name, value)
            changed = True
    else:
        if old_value is not None:
            cronvar.remove_variable(name)
            changed = True

    res_args = {"vars": cronvar.get_var_names(), "changed": changed}

    if changed:
        cronvar.write()

    # retain the backup only if crontab or cron file have changed
    if backup:
        if changed:
            res_args['backup_file'] = backup_file
        else:
            os.unlink(backup_file)

    if cron_file:
        res_args['cron_file'] = cron_file

    module.exit_json(**res_args)
Ejemplo n.º 11
0
def main():
    module = AnsibleModule(argument_spec=dict(
        log=dict(default='INFO',
                 choices=['DEBUG', 'INFO', 'ERROR', 'CRITICAL']),
        appliance=dict(required=True),
        lmi_port=dict(required=False, default=443, type='int'),
        username=dict(required=False),
        password=dict(required=True),
        isamuser=dict(required=False),
        isampwd=dict(required=True),
        isamdomain=dict(required=False, default='Default'),
        commands=dict(required=True, type='list'),
        adminProxyProtocol=dict(required=False,
                                default='https',
                                choices=['http', 'https']),
        adminProxyHostname=dict(required=False),
        adminProxyPort=dict(required=False, default=443, type='int'),
        adminProxyApplianceShortName=dict(required=False,
                                          default=False,
                                          type='bool'),
        omitAdminProxy=dict(required=False, default=False, type='bool')),
                           supports_check_mode=False)

    module.debug('Started isamadmin module')

    # Process all Arguments
    logLevel = module.params['log']
    appliance = module.params['appliance']
    lmi_port = module.params['lmi_port']
    username = module.params['username']
    password = module.params['password']
    isamuser = module.params['isamuser']
    isampwd = module.params['isampwd']
    isamdomain = module.params['isamdomain']
    commands = module.params['commands']
    adminProxyProtocol = module.params['adminProxyProtocol']
    adminProxyHostname = module.params['adminProxyHostname']
    adminProxyPort = module.params['adminProxyPort']
    adminProxyApplianceShortName = module.params[
        'adminProxyApplianceShortName']
    omitAdminProxy = module.params['omitAdminProxy']

    # Setup logging for format, set log level and redirect to string
    strlog = StringIO()
    DEFAULT_LOGGING = {
        'version': 1,
        'disable_existing_loggers': False,
        'formatters': {
            'standard': {
                'format':
                '[%(asctime)s] [PID:%(process)d TID:%(thread)d] [%(levelname)s] [%(name)s] [%(funcName)s():%(lineno)s] %(message)s'
            },
        },
        'handlers': {
            'default': {
                'level': logLevel,
                'formatter': 'standard',
                'class': 'logging.StreamHandler',
                'stream': strlog
            },
        },
        'loggers': {
            '': {
                'handlers': ['default'],
                'level': logLevel,
                'propagate': True
            },
            'requests.packages.urllib3.connectionpool': {
                'handlers': ['default'],
                'level': 'ERROR',
                'propagate': True
            }
        }
    }
    logging.config.dictConfig(DEFAULT_LOGGING)

    # Create appliance object to be used for all calls
    if username == '' or username is None:
        u = ApplianceUser(password=password)
    else:
        u = ApplianceUser(username=username, password=password)

# Create appliance object to be used for all calls
# if adminProxy hostname is set, use the ISAMApplianceAdminProxy
    if adminProxyHostname == '' or adminProxyHostname is None or omitAdminProxy:
        isam_server = ISAMAppliance(hostname=appliance,
                                    user=u,
                                    lmi_port=lmi_port)
    else:
        isam_server = ISAMApplianceAdminProxy(
            adminProxyHostname=adminProxyHostname,
            user=u,
            hostname=appliance,
            adminProxyProtocol=adminProxyProtocol,
            adminProxyPort=adminProxyPort,
            adminProxyApplianceShortName=adminProxyApplianceShortName)

    if isamuser == '' or isamuser is None:
        iu = ISAMUser(password=isampwd)
    else:
        iu = ISAMUser(username=isamuser, password=isampwd)

    try:
        import ibmsecurity.isam.web.runtime.pdadmin

        startd = datetime.datetime.now()

        ret_obj = ibmsecurity.isam.web.runtime.pdadmin.execute(
            isamAppliance=isam_server,
            isamUser=iu,
            admin_domain=isamdomain,
            commands=commands)

        endd = datetime.datetime.now()
        delta = endd - startd

        ret_obj['stdout'] = strlog.getvalue()
        ret_obj['stdout_lines'] = strlog.getvalue().split()
        ret_obj['start'] = str(startd)
        ret_obj['end'] = str(endd)
        ret_obj['delta'] = str(delta)
        ret_obj[
            'cmd'] = "ibmsecurity.isam.config_fed_dir.runtime.pdadmin.execute(isamAppliance=isam_server, isamUser=iu, commands=" + str(
                commands) + ")"
        ret_obj['ansible_facts'] = isam_server.facts

        module.exit_json(**ret_obj)

    except ImportError:
        module.fail_json(name='pdadmin',
                         msg='Error> Unable to import pdadmin module!',
                         log=strlog.getvalue())
    except AttributeError:
        module.fail_json(
            name='pdadmin',
            msg='Error> Error finding execute function of pdadmin module',
            log=strlog.getvalue())
    except TypeError:
        module.fail_json(
            name='pdadmin',
            msg=
            'Error> pdadmin has wrong set of arguments or there is a bug in code!',
            log=strlog.getvalue())
    except IBMError as e:
        module.fail_json(name='pdadmin', msg=str(e), log=strlog.getvalue())
Ejemplo n.º 12
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            jid=dict(type='str'),
            alias=dict(type='str'),
            signal=dict(type='int', default=signal.SIGTERM),
            # passed in from the async_status action plugin
            _async_dir=dict(type='path', required=True),
        ),
        required_one_of=[['jid', 'alias']],
        mutually_exclusive=[['jid', 'alias']])

    request = Request(module)
    response = Response(request)

    t = Tracker(module, response)
    data = t.load_job_state(request.log_path)
    module.debug("current state %s" % data)
    t.update_state(data)

    if response.finished:
        module.debug("job is finished. nothing to kill.")
        module.exit_json(**response.__dict__)

    if response.killed:
        module.debug("job has been killed previously. nothing to kill.")
        module.exit_json(**response.__dict__)

    pid = t.get_jid_pid(request.jid)
    alive = [pid]
    if not t.is_pid_alive(pid):
        alive = find_pid(request.jid)
        if not alive:
            module.debug(
                "job is unfinished and no wrapper alive. nothing to kill.")
            module.exit_json(**response.__dict__)

    # kill
    module.debug("alive pids = %s" % alive)

    data['killed'] = True
    prepare_state_update(request.log_path, data, "kill")

    for pid in alive:
        module.debug("killing group pid = %d, sig = %d" %
                     (pid, request.signal))
        t.kill_all(pid, request.signal)

    commit_state_update(request.log_path, "kill")
    # cleanup running wrapper marker
    try_cleanup_state_update(request.log_path, "tmp")
    response.killed = True

    module.exit_json(**response.__dict__)
Ejemplo n.º 13
0
def main():
    global module
    global results
    global suma_params

    module = AnsibleModule(argument_spec=dict(
        action=dict(required=False,
                    choices=[
                        'download', 'preview', 'list', 'edit', 'run',
                        'unschedule', 'delete', 'config', 'default'
                    ],
                    type='str',
                    default='preview'),
        oslevel=dict(required=False, type='str', default='Latest'),
        last_sp=dict(required=False, type='bool', default=False),
        extend_fs=dict(required=False, type='bool', default=True),
        download_dir=dict(required=False,
                          type='path',
                          default='/usr/sys/inst.images'),
        download_only=dict(required=False, type='bool', default=False),
        save_task=dict(required=False, type='bool', default=False),
        task_id=dict(required=False, type='str'),
        sched_time=dict(required=False, type='str'),
        description=dict(required=False, type='str'),
        metadata_dir=dict(required=False,
                          type='path',
                          default='/var/adm/ansible/metadata'),
    ),
                           required_if=[
                               ['action', 'edit', ['task_id']],
                               ['action', 'delete', ['task_id']],
                               ['action', 'run', ['task_id']],
                               ['action', 'download', ['oslevel']],
                               ['action', 'preview', ['oslevel']],
                               ['action', 'unschedule', ['task_id']],
                           ],
                           supports_check_mode=True)

    results = dict(
        changed=False,
        msg='',
        stdout='',
        stderr='',
        meta={'messages': []},
    )

    module.debug('*** START ***')
    module.run_command_environ_update = dict(LANG='C',
                                             LC_ALL='C',
                                             LC_MESSAGES='C',
                                             LC_CTYPE='C')

    action = module.params['action']

    # switch action
    if action == 'list':
        suma_params['task_id'] = module.params['task_id']
        suma_list()

    elif action == 'edit':
        suma_params['task_id'] = module.params['task_id']
        suma_params['sched_time'] = module.params['sched_time']
        suma_edit()

    elif action == 'unschedule':
        suma_params['task_id'] = module.params['task_id']
        suma_unschedule()

    elif action == 'delete':
        suma_params['task_id'] = module.params['task_id']
        suma_delete()

    elif action == 'run':
        suma_params['task_id'] = module.params['task_id']
        suma_run()

    elif action == 'config':
        suma_config()

    elif action == 'default':
        suma_default()

    elif action == 'download' or action == 'preview':
        suma_params['oslevel'] = module.params['oslevel']
        suma_params['download_dir'] = module.params['download_dir']
        suma_params['metadata_dir'] = module.params['metadata_dir']
        suma_params['download_only'] = module.params['download_only']
        suma_params['save_task'] = module.params['save_task']
        suma_params['last_sp'] = module.params['last_sp']
        suma_params['extend_fs'] = module.params['extend_fs']
        if module.params['description']:
            suma_params['description'] = module.params['description']
        else:
            suma_params['description'] = "{} request for oslevel {}".format(
                action, module.params['oslevel'])

        suma_params['action'] = action
        suma_download()

    # Exit
    msg = 'Suma {} completed successfully'.format(action)
    module.log(msg)
    results['msg'] = msg
    module.exit_json(**results)
Ejemplo n.º 14
0
class OVHModuleBase(object):
    def __init__(self,
                 derived_arg_spec,
                 bypass_checks=False,
                 no_log=False,
                 check_invalid_arguments=None,
                 mutually_exclusive=None,
                 required_together=None,
                 required_one_of=None,
                 add_file_common_args=False,
                 supports_check_mode=False,
                 required_if=None,
                 facts_module=False,
                 skip_exec=False):

        merged_arg_spec = dict()

        merged_arg_spec.update(COMMON_ARGS)

        if derived_arg_spec:
            merged_arg_spec.update(derived_arg_spec)

        self.module = AnsibleModule(argument_spec=merged_arg_spec,
                                    bypass_checks=bypass_checks,
                                    no_log=no_log,
                                    mutually_exclusive=mutually_exclusive,
                                    required_together=required_together,
                                    required_if=required_if,
                                    required_one_of=required_one_of,
                                    add_file_common_args=add_file_common_args,
                                    supports_check_mode=supports_check_mode)

        self.check_mode = self.module.check_mode

        if not HAS_OVH:
            self.fail(msg=missing_required_lib(
                'ovh (ovh >= {0})'.format(OVH_MIN_RELEASE)),
                      exception=HAS_OVH_EXC)

        self.facts_module = facts_module

        self.init_results()

        if not skip_exec:
            self.exec_module(**self.module.params)

        self.module.exit_json(**self.results)

    def __getattribute__(self, attribute: str) -> 'Any':
        try:
            return super().__getattribute__(attribute)
        except AttributeError:
            return self.module.params.get(attribute)

    def init_results(self):
        self.results = dict(changed=False)

    def exec_module(self, **kwargs):
        self.fail("Error: {0} failed to implement exec_module method.".format(
            self.__class__.__name__))

    def set_changed(self, changed: bool):
        self.results['changed'] = changed

    def fail(self, msg, **kwargs):
        '''
        Shortcut for calling module.fail()
        :param msg: Error message text.
        :param kwargs: Any key=value pairs
        :return: None
        '''
        self.module.fail_json(msg=msg, **kwargs)

    def log(self, msg, log_args: 'Optional[Dict[str,Any]]'):
        self.module.log(msg, log_args)

    def debug(self, msg):
        self.module.debug(msg)

    @property
    def client(self) -> ovh.Client:
        return self.delegated_client(self.consumer_key)

    def delegated_client(self,
                         consumer_key: 'Optional[str]' = None) -> ovh.Client:
        return ovh.Client(endpoint=self.endpoint,
                          application_key=self.application_key,
                          application_secret=self.application_secret,
                          consumer_key=consumer_key)
Ejemplo n.º 15
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            name=dict(type='list', required=True),
            state=dict(type='str', default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']),
            build=dict(type='bool', default=False),
            ports_dir=dict(type='path', default='/usr/ports'),
            quick=dict(type='bool', default=False),
            clean=dict(type='bool', default=False),
        ),
        supports_check_mode=True
    )

    name = module.params['name']
    state = module.params['state']
    build = module.params['build']
    ports_dir = module.params['ports_dir']

    rc = 0
    stdout = ''
    stderr = ''
    result = {}
    result['name'] = name
    result['state'] = state
    result['build'] = build

    # The data structure used to keep track of package information.
    pkg_spec = {}

    if build is True:
        if not os.path.isdir(ports_dir):
            module.fail_json(msg="the ports source directory %s does not exist" % (ports_dir))

        # build sqlports if its not installed yet
        parse_package_name(['sqlports'], pkg_spec, module)
        get_package_state(['sqlports'], pkg_spec, module)
        if not pkg_spec['sqlports']['installed_state']:
            module.debug("main(): installing 'sqlports' because build=%s" % module.params['build'])
            package_present(['sqlports'], pkg_spec, module)

    asterisk_name = False
    for n in name:
        if n == '*':
            if len(name) != 1:
                module.fail_json(msg="the package name '*' can not be mixed with other names")

            asterisk_name = True

    if asterisk_name:
        if state != 'latest':
            module.fail_json(msg="the package name '*' is only valid when using state=latest")
        else:
            # Perform an upgrade of all installed packages.
            upgrade_packages(pkg_spec, module)
    else:
        # Parse package names and put results in the pkg_spec dictionary.
        parse_package_name(name, pkg_spec, module)

        # Not sure how the branch syntax is supposed to play together
        # with build mode. Disable it for now.
        for n in name:
            if pkg_spec[n]['branch'] and module.params['build'] is True:
                module.fail_json(msg="the combination of 'branch' syntax and build=%s is not supported: %s" % (module.params['build'], n))

        # Get state for all package names.
        get_package_state(name, pkg_spec, module)

        # Perform requested action.
        if state in ['installed', 'present']:
            package_present(name, pkg_spec, module)
        elif state in ['absent', 'removed']:
            package_absent(name, pkg_spec, module)
        elif state == 'latest':
            package_latest(name, pkg_spec, module)

    # The combined changed status for all requested packages. If anything
    # is changed this is set to True.
    combined_changed = False

    # The combined failed status for all requested packages. If anything
    # failed this is set to True.
    combined_failed = False

    # We combine all error messages in this comma separated string, for example:
    # "msg": "Can't find nmapp\n, Can't find nmappp\n"
    combined_error_message = ''

    # Loop over all requested package names and check if anything failed or
    # changed.
    for n in name:
        if pkg_spec[n]['rc'] != 0:
            combined_failed = True
            if pkg_spec[n]['stderr']:
                if combined_error_message:
                    combined_error_message += ", %s" % pkg_spec[n]['stderr']
                else:
                    combined_error_message = pkg_spec[n]['stderr']
            else:
                if combined_error_message:
                    combined_error_message += ", %s" % pkg_spec[n]['stdout']
                else:
                    combined_error_message = pkg_spec[n]['stdout']

        if pkg_spec[n]['changed'] is True:
            combined_changed = True

    # If combined_error_message contains anything at least some part of the
    # list of requested package names failed.
    if combined_failed:
        module.fail_json(msg=combined_error_message, **result)

    result['changed'] = combined_changed

    module.exit_json(**result)
Ejemplo n.º 16
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            state=dict(type='str', default='present', choices=['absent', 'present']),
            name=dict(type='str', required=True),
            gid=dict(type='int'),
            system=dict(type='bool', default=False),
            local=dict(type='bool', default=False)
        ),
        supports_check_mode=True,
    )

    group = Group(module)

    module.debug('Group instantiated - platform %s' % group.platform)
    if group.distribution:
        module.debug('Group instantiated - distribution %s' % group.distribution)

    rc = None
    out = ''
    err = ''
    result = {}
    result['name'] = group.name
    result['state'] = group.state

    if group.state == 'absent':

        if group.group_exists():
            if module.check_mode:
                module.exit_json(changed=True)
            (rc, out, err) = group.group_del()
            if rc != 0:
                module.fail_json(name=group.name, msg=err)

    elif group.state == 'present':

        if not group.group_exists():
            if module.check_mode:
                module.exit_json(changed=True)
            (rc, out, err) = group.group_add(gid=group.gid, system=group.system)
        else:
            (rc, out, err) = group.group_mod(gid=group.gid)

        if rc is not None and rc != 0:
            module.fail_json(name=group.name, msg=err)

    if rc is None:
        result['changed'] = False
    else:
        result['changed'] = True
    if out:
        result['stdout'] = out
    if err:
        result['stderr'] = err

    if group.group_exists():
        info = group.group_info()
        result['system'] = group.system
        result['gid'] = info[2]

    module.exit_json(**result)
Ejemplo n.º 17
0
def main():
    module = AnsibleModule(
        argument_spec=dict(name=dict(required=True),
                           port=dict(default=623, type='int'),
                           user=dict(required=True, no_log=True),
                           password=dict(required=True, no_log=True),
                           state=dict(default='present',
                                      choices=['present', 'absent']),
                           bootdev=dict(required=True,
                                        choices=[
                                            'network', 'hd', 'safe', 'optical',
                                            'setup', 'default'
                                        ]),
                           persistent=dict(default=False, type='bool'),
                           uefiboot=dict(default=False, type='bool')),
        supports_check_mode=True,
    )

    if command is None:
        module.fail_json(msg='the python pyghmi module is required')

    name = module.params['name']
    port = module.params['port']
    user = module.params['user']
    password = module.params['password']
    state = module.params['state']
    bootdev = module.params['bootdev']
    persistent = module.params['persistent']
    uefiboot = module.params['uefiboot']
    request = dict()

    if state == 'absent' and bootdev == 'default':
        module.fail_json(
            msg="The bootdev 'default' cannot be used with state 'absent'.")

    # --- run command ---
    try:
        ipmi_cmd = command.Command(bmc=name,
                                   userid=user,
                                   password=password,
                                   port=port)
        module.debug('ipmi instantiated - name: "%s"' % name)
        current = ipmi_cmd.get_bootdev()
        # uefimode may not supported by BMC, so use desired value as default
        current.setdefault('uefimode', uefiboot)
        if state == 'present' and current != dict(
                bootdev=bootdev, persistent=persistent, uefimode=uefiboot):
            request = dict(bootdev=bootdev,
                           uefiboot=uefiboot,
                           persist=persistent)
        elif state == 'absent' and current['bootdev'] == bootdev:
            request = dict(bootdev='default')
        else:
            module.exit_json(changed=False, **current)

        if module.check_mode:
            response = dict(bootdev=request['bootdev'])
        else:
            response = ipmi_cmd.set_bootdev(**request)

        if 'error' in response:
            module.fail_json(msg=response['error'])

        if 'persist' in request:
            response['persistent'] = request['persist']
        if 'uefiboot' in request:
            response['uefimode'] = request['uefiboot']

        module.exit_json(changed=True, **response)
    except Exception as e:
        module.fail_json(msg=str(e))
Ejemplo n.º 18
0
def main():
    # The following example playbooks:
    #
    # - cron: name="check dirs" hour="5,2" job="ls -alh > /dev/null"
    #
    # - name: do the job
    #   cron: name="do the job" hour="5,2" job="/some/dir/job.sh"
    #
    # - name: no job
    #   cron: name="an old job" state=absent
    #
    # - name: sets env
    #   cron: name="PATH" env=yes value="/bin:/usr/bin"
    #
    # Would produce:
    # PATH=/bin:/usr/bin
    # # Ansible: check dirs
    # * * 5,2 * * ls -alh > /dev/null
    # # Ansible: do the job
    # * * 5,2 * * /some/dir/job.sh

    module = AnsibleModule(
        argument_spec=dict(
            name=dict(type='str'),
            user=dict(type='str'),
            job=dict(type='str', aliases=['value']),
            cron_file=dict(type='str'),
            state=dict(type='str', default='present', choices=['present', 'absent']),
            backup=dict(type='bool', default=False),
            minute=dict(type='str', default='*'),
            hour=dict(type='str', default='*'),
            day=dict(type='str', default='*', aliases=['dom']),
            month=dict(type='str', default='*'),
            weekday=dict(type='str', default='*', aliases=['dow']),
            reboot=dict(type='bool', default=False),
            special_time=dict(type='str', choices=["reboot", "yearly", "annually", "monthly", "weekly", "daily", "hourly"]),
            disabled=dict(type='bool', default=False),
            env=dict(type='bool'),
            insertafter=dict(type='str'),
            insertbefore=dict(type='str'),
        ),
        supports_check_mode=True,
        mutually_exclusive=[
            ['reboot', 'special_time'],
            ['insertafter', 'insertbefore'],
        ],
    )

    name = module.params['name']
    user = module.params['user']
    job = module.params['job']
    cron_file = module.params['cron_file']
    state = module.params['state']
    backup = module.params['backup']
    minute = module.params['minute']
    hour = module.params['hour']
    day = module.params['day']
    month = module.params['month']
    weekday = module.params['weekday']
    reboot = module.params['reboot']
    special_time = module.params['special_time']
    disabled = module.params['disabled']
    env = module.params['env']
    insertafter = module.params['insertafter']
    insertbefore = module.params['insertbefore']
    do_install = state == 'present'

    changed = False
    res_args = dict()
    warnings = list()

    if cron_file:
        cron_file_basename = os.path.basename(cron_file)
        if not re.search(r'^[A-Z0-9_-]+$', cron_file_basename, re.I):
            warnings.append('Filename portion of cron_file ("%s") should consist' % cron_file_basename +
                            ' solely of upper- and lower-case letters, digits, underscores, and hyphens')

    # Ensure all files generated are only writable by the owning user.  Primarily relevant for the cron_file option.
    os.umask(int('022', 8))
    crontab = CronTab(module, user, cron_file)

    module.debug('cron instantiated - name: "%s"' % name)

    if module._diff:
        diff = dict()
        diff['before'] = crontab.existing
        if crontab.cron_file:
            diff['before_header'] = crontab.cron_file
        else:
            if crontab.user:
                diff['before_header'] = 'crontab for user "%s"' % crontab.user
            else:
                diff['before_header'] = 'crontab'

    # --- user input validation ---

    if (special_time or reboot) and \
       (True in [(x != '*') for x in [minute, hour, day, month, weekday]]):
        module.fail_json(msg="You must specify time and date fields or special time.")

    # cannot support special_time on solaris
    if (special_time or reboot) and get_platform() == 'SunOS':
        module.fail_json(msg="Solaris does not support special_time=... or @reboot")

    if cron_file and do_install:
        if not user:
            module.fail_json(msg="To use cron_file=... parameter you must specify user=... as well")

    if job is None and do_install:
        module.fail_json(msg="You must specify 'job' to install a new cron job or variable")

    if (insertafter or insertbefore) and not env and do_install:
        module.fail_json(msg="Insertafter and insertbefore parameters are valid only with env=yes")

    if reboot:
        special_time = "reboot"

    # if requested make a backup before making a change
    if backup and not module.check_mode:
        (backuph, backup_file) = tempfile.mkstemp(prefix='crontab')
        crontab.write(backup_file)

    if crontab.cron_file and not name and not do_install:
        if module._diff:
            diff['after'] = ''
            diff['after_header'] = '/dev/null'
        else:
            diff = dict()
        if module.check_mode:
            changed = os.path.isfile(crontab.cron_file)
        else:
            changed = crontab.remove_job_file()
        module.exit_json(changed=changed, cron_file=cron_file, state=state, diff=diff)

    if env:
        if ' ' in name:
            module.fail_json(msg="Invalid name for environment variable")
        decl = '%s="%s"' % (name, job)
        old_decl = crontab.find_env(name)

        if do_install:
            if len(old_decl) == 0:
                crontab.add_env(decl, insertafter, insertbefore)
                changed = True
            if len(old_decl) > 0 and old_decl[1] != decl:
                crontab.update_env(name, decl)
                changed = True
        else:
            if len(old_decl) > 0:
                crontab.remove_env(name)
                changed = True
    else:
        if do_install:
            for char in ['\r', '\n']:
                if char in job.strip('\r\n'):
                    warnings.append('Job should not contain line breaks')
                    break

            job = crontab.get_cron_job(minute, hour, day, month, weekday, job, special_time, disabled)
            old_job = crontab.find_job(name, job)

            if len(old_job) == 0:
                crontab.add_job(name, job)
                changed = True
            if len(old_job) > 0 and old_job[1] != job:
                crontab.update_job(name, job)
                changed = True
            if len(old_job) > 2:
                crontab.update_job(name, job)
                changed = True
        else:
            old_job = crontab.find_job(name)

            if len(old_job) > 0:
                crontab.remove_job(name)
                changed = True

    # no changes to env/job, but existing crontab needs a terminating newline
    if not changed and not crontab.existing == '':
        if not (crontab.existing.endswith('\r') or crontab.existing.endswith('\n')):
            changed = True

    res_args = dict(
        jobs=crontab.get_jobnames(),
        envs=crontab.get_envnames(),
        warnings=warnings,
        changed=changed
    )

    if changed:
        if not module.check_mode:
            crontab.write()
        if module._diff:
            diff['after'] = crontab.render()
            if crontab.cron_file:
                diff['after_header'] = crontab.cron_file
            else:
                if crontab.user:
                    diff['after_header'] = 'crontab for user "%s"' % crontab.user
                else:
                    diff['after_header'] = 'crontab'

            res_args['diff'] = diff

    # retain the backup only if crontab or cron file have changed
    if backup and not module.check_mode:
        if changed:
            res_args['backup_file'] = backup_file
        else:
            os.unlink(backup_file)

    if cron_file:
        res_args['cron_file'] = cron_file

    module.exit_json(**res_args)

    # --- should never get here
    module.exit_json(msg="Unable to execute cron task.")
Ejemplo n.º 19
0
def main():
    module = AnsibleModule(
        argument_spec=dict(servers=dict(required=True, type='list'),
                           realm=dict(required=True),
                           hostname=dict(required=True),
                           debug=dict(required=False,
                                      type='bool',
                                      default="false")),
        supports_check_mode=True,
    )

    module._ansible_debug = True
    realm = module.params.get('realm')
    hostname = module.params.get('hostname')
    servers = module.params.get('servers')
    debug = module.params.get('debug')

    fstore = sysrestore.FileStore(paths.IPA_CLIENT_SYSRESTORE)
    statestore = sysrestore.StateFile(paths.IPA_CLIENT_SYSRESTORE)
    host_principal = 'host/%s@%s' % (hostname, realm)
    os.environ['KRB5CCNAME'] = paths.IPA_DNS_CCACHE

    ca_certs = x509.load_certificate_list_from_file(paths.IPA_CA_CRT)
    if NUM_VERSION >= 40500 and NUM_VERSION < 40590:
        ca_certs = [
            cert.public_bytes(serialization.Encoding.DER) for cert in ca_certs
        ]
    elif NUM_VERSION < 40500:
        ca_certs = [cert.der_data for cert in ca_certs]

    with certdb.NSSDatabase() as tmp_db:
        api.bootstrap(context='cli_installer',
                      confdir=paths.ETC_IPA,
                      debug=debug,
                      delegate=False,
                      nss_dir=tmp_db.secdir)

        if 'config_loaded' not in api.env:
            module.fail_json(msg="Failed to initialize IPA API.")

        # Clear out any current session keyring information
        try:
            delete_persistent_client_session_data(host_principal)
        except ValueError:
            pass

        # Add CA certs to a temporary NSS database
        argspec = inspect.getargspec(tmp_db.create_db)
        try:
            if NUM_VERSION > 40400:
                tmp_db.create_db()

                for i, cert in enumerate(ca_certs):
                    tmp_db.add_cert(cert, 'CA certificate %d' % (i + 1),
                                    certdb.EXTERNAL_CA_TRUST_FLAGS)
            else:
                pwd_file = write_tmp_file(ipa_generate_password())
                tmp_db.create_db(pwd_file.name)

                for i, cert in enumerate(ca_certs):
                    tmp_db.add_cert(cert, 'CA certificate %d' % (i + 1), 'C,,')
        except CalledProcessError as e:
            module.fail_json(msg="Failed to add CA to temporary NSS database.")

        api.finalize()

        # Now, let's try to connect to the server's RPC interface
        connected = False
        try:
            api.Backend.rpcclient.connect()
            connected = True
            module.debug("Try RPC connection")
            api.Backend.rpcclient.forward('ping')
        except errors.KerberosError as e:
            if connected:
                api.Backend.rpcclient.disconnect()
            module.log(
                "Cannot connect to the server due to Kerberos error: %s. "
                "Trying with delegate=True" % e)
            try:
                api.Backend.rpcclient.connect(delegate=True)
                module.debug("Try RPC connection")
                api.Backend.rpcclient.forward('ping')

                module.log("Connection with delegate=True successful")

                # The remote server is not capable of Kerberos S4U2Proxy
                # delegation. This features is implemented in IPA server
                # version 2.2 and higher
                module.warn(
                    "Target IPA server has a lower version than the enrolled "
                    "client")
                module.warn(
                    "Some capabilities including the ipa command capability "
                    "may not be available")
            except errors.PublicError as e2:
                module.fail_json(
                    msg="Cannot connect to the IPA server RPC interface: %s" %
                    e2)
        except errors.PublicError as e:
            module.fail_json(
                msg="Cannot connect to the server due to generic error: %s" %
                e)
    # Use the RPC directly so older servers are supported
    try:
        result = api.Backend.rpcclient.forward(
            'ca_is_enabled',
            version=u'2.107',
        )
        ca_enabled = result['result']
    except (errors.CommandError, errors.NetworkError):
        result = api.Backend.rpcclient.forward(
            'env',
            server=True,
            version=u'2.0',
        )
        ca_enabled = result['result']['enable_ra']
    if not ca_enabled:
        disable_ra()

    module.exit_json(changed=True, ca_enabled=ca_enabled)
Ejemplo n.º 20
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            name=dict(required=True),
            port=dict(default=623, type='int'),
            user=dict(required=True, no_log=True),
            password=dict(required=True, no_log=True),
            state=dict(default='present', choices=['present', 'absent']),
            bootdev=dict(required=True, choices=['network', 'hd', 'safe', 'optical', 'setup', 'default']),
            persistent=dict(default=False, type='bool'),
            uefiboot=dict(default=False, type='bool')
        ),
        supports_check_mode=True,
    )

    if command is None:
        module.fail_json(msg='the python pyghmi module is required')

    name = module.params['name']
    port = module.params['port']
    user = module.params['user']
    password = module.params['password']
    state = module.params['state']
    bootdev = module.params['bootdev']
    persistent = module.params['persistent']
    uefiboot = module.params['uefiboot']
    request = dict()

    if state == 'absent' and bootdev == 'default':
        module.fail_json(msg="The bootdev 'default' cannot be used with state 'absent'.")

    # --- run command ---
    try:
        ipmi_cmd = command.Command(
            bmc=name, userid=user, password=password, port=port
        )
        module.debug('ipmi instantiated - name: "%s"' % name)
        current = ipmi_cmd.get_bootdev()
        # uefimode may not supported by BMC, so use desired value as default
        current.setdefault('uefimode', uefiboot)
        if state == 'present' and current != dict(bootdev=bootdev, persistent=persistent, uefimode=uefiboot):
            request = dict(bootdev=bootdev, uefiboot=uefiboot, persist=persistent)
        elif state == 'absent' and current['bootdev'] == bootdev:
            request = dict(bootdev='default')
        else:
            module.exit_json(changed=False, **current)

        if module.check_mode:
            response = dict(bootdev=request['bootdev'])
        else:
            response = ipmi_cmd.set_bootdev(**request)

        if 'error' in response:
            module.fail_json(msg=response['error'])

        if 'persist' in request:
            response['persistent'] = request['persist']
        if 'uefiboot' in request:
            response['uefimode'] = request['uefiboot']

        module.exit_json(changed=True, **response)
    except Exception as e:
        module.fail_json(msg=str(e))
Ejemplo n.º 21
0
def main():
    global results
    suma_params = {}

    module = AnsibleModule(argument_spec=dict(
        action=dict(required=False,
                    choices=[
                        'download', 'preview', 'list', 'edit', 'unschedule',
                        'delete', 'config', 'default'
                    ],
                    type='str',
                    default='preview'),
        targets=dict(required=False, type='list', elements='str'),
        oslevel=dict(required=False, type='str', default='Latest'),
        lpp_source_name=dict(required=False, type='str'),
        download_dir=dict(required=False, type='path'),
        download_only=dict(required=False, type='bool', default=False),
        extend_fs=dict(required=False, type='bool', default=True),
        task_id=dict(required=False, type='str'),
        sched_time=dict(required=False, type='str'),
        description=dict(required=False, type='str'),
        metadata_dir=dict(required=False,
                          type='path',
                          default='/var/adm/ansible/metadata'),
    ),
                           required_if=[
                               ['action', 'edit', ['task_id']],
                               ['action', 'delete', ['task_id']],
                               ['action', 'unschedule', ['task_id']],
                               ['action', 'preview', ['oslevel']],
                               ['action', 'download', ['oslevel']],
                           ],
                           supports_check_mode=True)

    results = dict(
        changed=False,
        msg='',
        stdout='',
        stderr='',
        meta={'messages': []},
        target_list=(),
    )

    module.debug('*** START ***')

    # Get Module params
    action = module.params['action']
    suma_params['action'] = action

    suma_params['LppSource'] = ''
    suma_params['target_clients'] = ()

    # switch action
    if action == 'list':
        suma_params['task_id'] = module.params['task_id']
        suma_list(module, suma_params)

    elif action == 'edit':
        suma_params['task_id'] = module.params['task_id']
        suma_params['sched_time'] = module.params['sched_time']
        suma_edit(module, suma_params)

    elif action == 'unschedule':
        suma_params['task_id'] = module.params['task_id']
        suma_unschedule(module)

    elif action == 'delete':
        suma_params['task_id'] = module.params['task_id']
        suma_delete(module, suma_params)

    elif action == 'config':
        suma_config(module)

    elif action == 'default':
        suma_default(module)

    elif action == 'download' or action == 'preview':
        suma_params['targets'] = module.params['targets']
        suma_params['download_dir'] = module.params['download_dir']
        suma_params['download_only'] = module.params['download_only']
        suma_params['lpp_source_name'] = module.params['lpp_source_name']
        suma_params['extend_fs'] = module.params['extend_fs']
        if suma_params['req_oslevel'].upper() == 'LATEST':
            suma_params['req_oslevel'] = 'Latest'
        else:
            suma_params['req_oslevel'] = module.params['oslevel']
        if module.params['description']:
            suma_params['description'] = module.params['description']
        else:
            suma_params['description'] = "{} request for oslevel {}".format(
                action, suma_params['req_oslevel'])
        suma_params['metadata_dir'] = module.params['metadata_dir']
        suma_download(module, suma_params)

    # Exit
    msg = 'Suma {} completed successfully'.format(action)
    module.info(msg)
    results['msg'] = msg
    results['lpp_source_name'] = suma_params['LppSource']
    results['target_list'] = suma_params['target_clients']
    module.exit_json(**results)