Exemple #1
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            src=dict(default=None, required=True, type="path"),
            dst=dict(default=None, required=True, type="path"),
            ),
        add_file_common_args=True,
        supports_check_mode=False
    )

    src = os.path.abspath(os.path.expanduser(module.params['src']))
    dst = os.path.abspath(os.path.expanduser(module.params['dst']))

    exists = os.path.lexists(dst)
    changed = not (exists and os.path.islink(dst) and os.readlink(dst) == src)

    if changed:
        try:
            if exists:
                if os.path.isdir(dst):
                    shutil.rmtree(dst, ignore_errors=False)
                else:
                    os.unlink(dst)
            os.symlink(src, dst)
        except Exception as e:
            module.fail_json(msg="operation failed: {}".format(str(e)))

    module.exit_json(changed=changed)
def main():
    module = AnsibleModule(
        argument_spec = dict(
            recipe    = dict(required=True, type='str'),
            username  = dict(required=True, type='str'),
            password  = dict(required=True, type='str'),
            hostname  = dict(default="127.0.0.1", type="str"),
            port      = dict(default=80, type="int")
        )
    )

    xos_auth=(module.params['username'], module.params['password'])

    url = "http://%s:%d/api/utility/tosca/run/" % (module.params['hostname'], module.params['port'])
    
    r = requests.post(url, data={"recipe": module.params['recipe']}, auth=xos_auth)
    if (r.status_code != 200):
        try:
            error_text=r.json()["error_text"]
        except:
            error_text="error while formatting the error: " + traceback.format_exc()
        module.fail_json(msg=error_text, rc=r.status_code)

    result = r.json()
    if "log_msgs" in result:
        module.exit_json(changed=True, msg="\n".join(result["log_msgs"])+"\n")
    else:
        module.exit_json(changed=True, msg="success")
def main():

    argument_spec = vmware_argument_spec()
    argument_spec.update(dict(vmk_name=dict(required=True, type='str'),
                         ip_address=dict(required=True, type='str'),
                         subnet_mask=dict(required=True, type='str')))

    module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)

    if not HAS_PYVMOMI:
        module.fail_json(msg='pyvmomi is required for this module')

    vmk_name = module.params['vmk_name']
    ip_address = module.params['ip_address']
    subnet_mask = module.params['subnet_mask']

    try:
        content = connect_to_api(module, False)
        host = get_all_objs(content, [vim.HostSystem])
        if not host:
            module.fail_json(msg="Unable to locate Physical Host.")
        host_system = host.keys()[0]
        changed = configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask)
        module.exit_json(changed=changed)
    except vmodl.RuntimeFault as runtime_fault:
        module.fail_json(msg=runtime_fault.msg)
    except vmodl.MethodFault as method_fault:
        module.fail_json(msg=method_fault.msg)
    except Exception as e:
        module.fail_json(msg=str(e))
Exemple #4
0
def main():
    argument_spec = vmware_argument_spec()
    argument_spec.update(
        name=dict(type='str'),
        name_match=dict(type='str', choices=['first', 'last'], default='first'),
        uuid=dict(type='str'),
        folder=dict(type='str', default='/vm'),
        datacenter=dict(type='str', required=True),
    )
    module = AnsibleModule(argument_spec=argument_spec,
                           required_one_of=[['name', 'uuid']])

    # FindByInventoryPath() does not require an absolute path
    # so we should leave the input folder path unmodified
    module.params['folder'] = module.params['folder'].rstrip('/')

    pyv = PyVmomiHelper(module)
    # Check if the VM exists before continuing
    vm = pyv.getvm(name=module.params['name'],
                   folder=module.params['folder'],
                   uuid=module.params['uuid'])

    # VM already exists
    if vm:
        try:
            module.exit_json(instance=pyv.gather_facts(vm))
        except Exception as exc:
            module.fail_json(msg="Fact gather failed with exception %s" % to_text(exc))
    else:
        msg = "Unable to gather facts for non-existing VM "
        if module.params['name']:
            msg += "%(name)s" % module.params
        elif module.params['uuid']:
            msg += "%(uuid)s" % module.params
        module.fail_json(msg=msg)
Exemple #5
0
def main():
    argument_spec = rax_argument_spec()
    argument_spec.update(
        dict(
            name=dict(required=True),
            public_key=dict(),
            state=dict(default='present', choices=['absent', 'present']),
        )
    )

    module = AnsibleModule(
        argument_spec=argument_spec,
        required_together=rax_required_together(),
    )

    if not HAS_PYRAX:
        module.fail_json(msg='pyrax is required for this module')

    name = module.params.get('name')
    public_key = module.params.get('public_key')
    state = module.params.get('state')

    setup_rax_module(module, pyrax)

    rax_keypair(module, name, public_key, state)
def main():

    argument_spec = vmware_argument_spec()
    argument_spec.update(dict(target_id=dict(required=True, type='int')))
    module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)

    if not HAS_PYVMOMI:
        module.fail_json(msg='pyvmomi is required for this module')

    content = connect_to_api(module)
    host = find_hostsystem(content)

    target_lun_uuid = {}
    scsilun_canonical = {}

    # Associate the scsiLun key with the canonicalName (NAA)
    for scsilun in host.config.storageDevice.scsiLun:
        scsilun_canonical[scsilun.key] = scsilun.canonicalName

    # Associate target number with LUN uuid
    for target in host.config.storageDevice.scsiTopology.adapter[0].target:
        for lun in target.lun:
            target_lun_uuid[target.target] = lun.scsiLun

    module.exit_json(changed=False, canonical=scsilun_canonical[target_lun_uuid[module.params['target_id']]])
Exemple #7
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            state=dict(default='present', choices=['present', 'absent']),
            name=dict(required=True),
            repo=dict(default='https://cran.rstudio.com/')
        )
    )
    state = module.params['state']
    name = module.params['name']
    changed = False
    version = get_installed_version(module)

    if state == 'present' and not version:
        stderr = install(module)
        version = get_installed_version(module)
        if not version:
            module.fail_json(
                msg='Failed to install {name:}: {err:}'.format(
                    name=name, err=stderr, version=version))
        changed = True

    elif state == 'absent' and version:
        stderr = uninstall(module)
        version = get_installed_version(module)
        if version:
            module.fail_json(
                msg='Failed to install {name:}: {err:}'.format(
                    name=name, err=stderr))
        changed = True

    module.exit_json(changed=changed, name=name, version=version)
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(
        dict(
            name=dict(required=True, type='str'),
            adjustment_type=dict(type='str', choices=['ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity']),
            asg_name=dict(required=True, type='str'),
            scaling_adjustment=dict(type='int'),
            min_adjustment_step=dict(type='int'),
            cooldown=dict(type='int'),
            state=dict(default='present', choices=['present', 'absent']),
        )
    )

    module = AnsibleModule(argument_spec=argument_spec)

    if not HAS_BOTO:
        module.fail_json(msg='boto required for this module')

    region, ec2_url, aws_connect_params = get_aws_connection_info(module)

    state = module.params.get('state')

    try:
        connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
    except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
        module.fail_json(msg=str(e))

    if state == 'present':
        create_scaling_policy(connection, module)
    elif state == 'absent':
        delete_scaling_policy(connection, module)
def main():

    argument_spec = ec2_argument_spec()
    argument_spec.update(
        dict(
            load_balancer_arn=dict(type='str'),
            target_group_arns=dict(type='list'),
            names=dict(type='list')
        )
    )

    module = AnsibleModule(argument_spec=argument_spec,
                           mutually_exclusive=['load_balancer_arn', 'target_group_arns', 'names'],
                           supports_check_mode=True
                           )

    if not HAS_BOTO3:
        module.fail_json(msg='boto3 required for this module')

    region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)

    if region:
        connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params)
    else:
        module.fail_json(msg="region must be specified")

    list_target_groups(connection, module)
def main():
    module = AnsibleModule(
        argument_spec={
            "gitlab_url": {"required": True, type: "bytes"},
            "gitlab_project": {"required": False, "default": None, type: "bytes"},
            "gitlab_token": {"required": True, type: "bytes"},
            "variables": {"required": True, "type": "dict"},
        },
        supports_check_mode=True
    )

    if not HAS_GITLABBUILDVARIABLES:
        module.fail_json(msg="gitlabbuildvariables is required for this module")

    gitlab_config = GitLabConfig(module.params["gitlab_url"], module.params["gitlab_token"])
    gitlab_project = module.params["gitlab_project"]
    project_updater_builder = DictBasedProjectVariablesUpdaterBuilder({"variables": module.params["variables"]})

    updater = project_updater_builder.build(project=gitlab_project, groups=["variables"],
                                            gitlab_config=gitlab_config)

    update_required = updater.update_required()
    if module.check_mode:
        module.exit_json(changed=update_required)
    else:
        if not update_required:
            module.exit_json(changed=False, message="Gitlab build variables already set")
        else:
            updater.update()
            module.exit_json(changed=True, message="Gitlab build variables updated successfully")
Exemple #11
0
def main():
    """main entry point for module execution
    """
    argument_spec = dict(
        commands=dict(type='list', required=True),

        wait_for=dict(type='list', aliases=['waitfor']),
        match=dict(default='all', choices=['all', 'any']),

        retries=dict(default=10, type='int'),
        interval=dict(default=1, type='int')
    )

    argument_spec.update(ios_argument_spec)

    module = AnsibleModule(argument_spec=argument_spec,
                           supports_check_mode=True)

    result = {'changed': False}

    warnings = list()
    check_args(module, warnings)
    commands = parse_commands(module, warnings)
    result['warnings'] = warnings

    wait_for = module.params['wait_for'] or list()
    conditionals = [Conditional(c) for c in wait_for]

    retries = module.params['retries']
    interval = module.params['interval']
    match = module.params['match']

    while retries > 0:
        responses = run_commands(module, commands)

        for item in list(conditionals):
            if item(responses):
                if match == 'any':
                    conditionals = list()
                    break
                conditionals.remove(item)

        if not conditionals:
            break

        time.sleep(interval)
        retries -= 1

    if conditionals:
        failed_conditions = [item.raw for item in conditionals]
        msg = 'One or more conditional statements have not been satisfied'
        module.fail_json(msg=msg, failed_conditions=failed_conditions)

    result.update({
        'changed': False,
        'stdout': responses,
        'stdout_lines': list(to_lines(responses))
    })

    module.exit_json(**result)
Exemple #12
0
def main():

    module = AnsibleModule(
        argument_spec=dict(
            name=dict(type='str'),
            count=dict(default=1, type='int'),
            state=dict(choices=['present', 'absent']),
        ),
    )

    try:
        d = Dummy()
        execute_output = d.execute(module)

        json_output = {}
        host = execute_output.get('host')
        changed = execute_output.get('changed')
        if host or changed is not None:
            json_output['changed'] = True
            json_output.update(execute_output)
        else:
            json_output['changed'] = False

        module.exit_json(**json_output)
    except Exception as e:
        module.fail_json(msg=str(e))
def main():
    argument_spec = rax_argument_spec()
    argument_spec.update(
        dict(
            state=dict(default='present', choices=['present', 'absent']),
            label=dict(required=True),
            notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']),
            details=dict(required=True, type='dict')
        )
    )

    module = AnsibleModule(
        argument_spec=argument_spec,
        required_together=rax_required_together()
    )

    if not HAS_PYRAX:
        module.fail_json(msg='pyrax is required for this module')

    state = module.params.get('state')

    label = module.params.get('label')
    notification_type = module.params.get('notification_type')
    details = module.params.get('details')

    setup_rax_module(module, pyrax)

    notification(module, state, label, notification_type, details)
Exemple #14
0
def main():

    module = AnsibleModule(
        argument_spec=dict(
        state=dict(required=True, choices=['present', 'absent']),
        name=dict(required=False),
        apikey=dict(required=True),
        apiid=dict(required=True),
        validate_certs = dict(default='yes', type='bool'),
        )
    )

    state = module.params['state']
    name= module.params['name']
    apikey = module.params['api_key']
    apiid = module.params['api_id']

    if state == "present":
        (rc, result) = create_meter(module, name, apiid, apikey)

    if state == "absent":
        (rc, result) = delete_meter(module, name, apiid, apikey)

    if rc != 0:
        module.fail_json(msg=result)

    module.exit_json(status=result,changed=True)
def main():
    Module = AnsibleModule(
        argument_spec=dict(
            env_id=dict(required=False, type='str'),
            name=dict(required=True, type='str'),
            release=dict(required=False, type='str')
        )
    )

    env_id = Module.params['env_id']
    release = Module.params['release']
    repo_name = Module.params['name']

    try:
        if not env_id and not release:
            raise BaseException("Either env_id or release must be given")
        if env_id and repo_name:
            remove_from_env(env_id, repo_name)
        if release and repo_name:
            remove_from_release(release, repo_name)

    except Exception as e:
        Module.fail_json(msg="Exception occurred {}".format(e))

    Module.exit_json(changed=True, result=0)
Exemple #16
0
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(dict(
        vpc_id=dict(),
        name=dict(),
        nacl_id=dict(),
        subnets=dict(required=False, type='list', default=list()),
        tags=dict(required=False, type='dict'),
        ingress=dict(required=False, type='list', default=list()),
        egress=dict(required=False, type='list', default=list()),
        state=dict(default='present', choices=['present', 'absent']),
    ),
    )
    module = AnsibleModule(argument_spec=argument_spec,
                           supports_check_mode=True,
                           required_one_of=[['name', 'nacl_id']],
                           required_if=[['state', 'present', ['vpc_id']]])

    if not HAS_BOTO3:
        module.fail_json(msg='json, botocore and boto3 are required.')
    state = module.params.get('state').lower()
    try:
        region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
        client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
    except botocore.exceptions.NoCredentialsError as e:
        module.fail_json(msg="Can't authorize connection - %s" % str(e))

    invocations = {
        "present": setup_network_acl,
        "absent": remove_network_acl
    }
    (changed, results) = invocations[state](client, module)
    module.exit_json(changed=changed, nacl_id=results)
def main():
    argument_spec = ovirt_full_argument_spec(
        authz_name=dict(required=True, aliases=['domain']),
        user_name=dict(rdefault=None),
        group_name=dict(default=None),
        namespace=dict(default=None),
    )
    module = AnsibleModule(argument_spec)
    check_sdk(module)

    try:
        connection = create_connection(module.params.pop('auth'))
        permissions_service = _permissions_service(connection, module)
        permissions = []
        for p in permissions_service.list():
            newperm = dict()
            for key, value in p.__dict__.items():
                if value and isinstance(value, sdk.Struct):
                    newperm[key[1:]] = get_link_name(connection, value)
            permissions.append(newperm)

        module.exit_json(
            changed=False,
            ansible_facts=dict(ovirt_permissions=permissions),
        )
    except Exception as e:
        module.fail_json(msg=str(e), exception=traceback.format_exc())
    finally:
        connection.close(logout=False)
Exemple #18
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            state=dict(type='str', default='file', choices=['file', 'directory']),
            path=dict(type='path'),
            prefix=dict(type='str', default='ansible.'),
            suffix=dict(type='str', default=''),
        ),
    )

    try:
        if module.params['state'] == 'file':
            handle, path = mkstemp(
                prefix=module.params['prefix'],
                suffix=module.params['suffix'],
                dir=module.params['path'],
            )
            close(handle)
        elif module.params['state'] == 'directory':
            path = mkdtemp(
                prefix=module.params['prefix'],
                suffix=module.params['suffix'],
                dir=module.params['path'],
            )

        module.exit_json(changed=True, path=path)
    except Exception as e:
        module.fail_json(msg=to_native(e), exception=format_exc())
Exemple #19
0
def main():
    ''' ansible oc module for secrets '''

    module = AnsibleModule(
        argument_spec=dict(
            state=dict(default='present', type='str',
                       choices=['present', 'absent', 'list']),
            debug=dict(default=False, type='bool'),
            src=dict(default=None, type='str'),
            content=dict(default=None),
            content_type=dict(default='dict', choices=['dict']),
            key=dict(default='', type='str'),
            value=dict(),
            value_type=dict(default='', type='str'),
            update=dict(default=False, type='bool'),
            append=dict(default=False, type='bool'),
            index=dict(default=None, type='int'),
            curr_value=dict(default=None, type='str'),
            curr_value_format=dict(default='yaml',
                                   choices=['yaml', 'json', 'str'],
                                   type='str'),
            backup=dict(default=True, type='bool'),
            separator=dict(default='.', type='str'),
        ),
        mutually_exclusive=[["curr_value", "index"], ['update', "append"]],
        required_one_of=[["content", "src"]],
    )

    rval = Yedit.run_ansible(module)
    if 'failed' in rval and rval['failed']:
        module.fail_json(**rval)

    module.exit_json(**rval)
def main():
    module = AnsibleModule(
        argument_spec=dict(
            description=dict(type='str', required=False),
            external=dict(type='bool', required=False),
            gidnumber=dict(type='str', required=False, aliases=['gid']),
            cn=dict(type='str', required=True, aliases=['name']),
            nonposix=dict(type='str', required=False),
            state=dict(type='str', required=False, default='present', choices=['present', 'absent']),
            ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']),
            ipa_host=dict(type='str', required=False, default='ipa.example.com'),
            ipa_port=dict(type='int', required=False, default=443),
            ipa_user=dict(type='str', required=False, default='admin'),
            ipa_pass=dict(type='str', required=True, no_log=True),
        ),
        supports_check_mode=True,
    )

    client = IPAClient(module=module,
                       host=module.params['ipa_host'],
                       port=module.params['ipa_port'],
                       username=module.params['ipa_user'],
                       password=module.params['ipa_pass'],
                       protocol=module.params['ipa_prot'])
    try:
        client.login()
        changed, group = ensure(module, client)
        module.exit_json(changed=changed, group=group)
    except Exception as e:
        module.fail_json(msg=e.message)
def main():
    ''' ansible oc module for secrets '''

    module = AnsibleModule(
        argument_spec=dict(
            query=dict(default='offer', choices=['offer', 'operation']),
            publisher=dict(default='redhat', type='str'),
            debug=dict(default=False, type='bool'),
            tenant_id=dict(default=os.environ.get('AZURE_TENANT_ID'), type='str'),
            client_id=dict(default=os.environ.get('AZURE_CLIENT_ID'), type='str'),
            client_secret=dict(default=os.environ.get('AZURE_CLIENT_SECRET'), type='str'),
            offer=dict(default=None, type='str'),
            operation=dict(default=None, type='str'),
            status=dict(default=None, type='str'),
        ),
    )

    # Verify we recieved either a valid key or edits with valid keys when receiving a src file.
    # A valid key being not None or not ''.
    if (module.params['tenant_id'] is None or module.params['client_id'] is None or
            module.params['client_secret'] is None):
        return module.fail_json(**{'failed': True,
                                   'msg': 'Please specify tenant_id, client_id, and client_secret'})

    rval = AzurePublisher.run_ansible(module.params)
    if int(rval['status_code']) == 404:
        rval['msg'] = 'Offer does not exist.'
    elif int(rval['status_code']) >= 300:
        rval['msg'] = 'Error.'
        return module.fail_json(**rval)

    return module.exit_json(**rval)
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(dict(state=dict(default='present', choices=['present', 'absent']),
                              name=dict(),
                              amazon_asn=dict(),
                              virtual_gateway_id=dict(),
                              direct_connect_gateway_id=dict(),
                              wait_timeout=dict(type='int', default=320)))
    required_if = [('state', 'present', ['name', 'amazon_asn']),
                   ('state', 'absent', ['direct_connect_gateway_id'])]
    module = AnsibleModule(argument_spec=argument_spec,
                           required_if=required_if)

    if not HAS_BOTO3:
        module.fail_json(msg='boto3 is required for this module')

    state = module.params.get('state')

    region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
    client = boto3_conn(module, conn_type='client', resource='directconnect', region=region, endpoint=ec2_url, **aws_connect_kwargs)

    if state == 'present':
        (changed, results) = ensure_present(client, module)
    elif state == 'absent':
        changed = ensure_absent(client, module)
        results = {}

    module.exit_json(changed=changed, **camel_dict_to_snake_dict(results))
def main():
    argument_spec = ovirt_full_argument_spec(
        pattern=dict(default='', required=False),
    )
    module = AnsibleModule(argument_spec)
    check_sdk(module)

    try:
        connection = create_connection(module.params.pop('auth'))
        vms_service = connection.system_service().vms_service()
        vms = vms_service.list(search=module.params['pattern'])
        module.exit_json(
            changed=False,
            ansible_facts=dict(
                ovirt_vms=[
                    get_dict_of_struct(
                        struct=c,
                        connection=connection,
                        fetch_nested=1,
                        attributes=['name', 'description'],
                    ) for c in vms
                ],
            ),
        )
    except Exception as e:
        module.fail_json(msg=str(e), exception=traceback.format_exc())
    finally:
        connection.close(logout=False)
Exemple #24
0
def _main():
    """
    Entrypoint.
    """
    module = AnsibleModule(
        argument_spec={
            'name': {'required': True, 'type': 'str'},
            'version': {'default': None, 'required': False, 'type': 'str'},
            'state': {
                'default': 'present',
                'required': False,
                'choices': ['present', 'absent', 'latest']
            },
            'channels': {'default': None, 'required': False},
            'executable': {'default': None, 'required': False},
            'extra_args': {'default': None, 'required': False, 'type': 'str'}
        },
        supports_check_mode=True)

    conda = find_conda(module.params['executable'])
    name = module.params['name']
    state = module.params['state']
    version = module.params['version']

    if state == 'latest' and version is not None:
        module.fail_json(msg='`version` must not be set if `state == "latest"` (`latest` upgrades to newest version)')

    def command_runner(command):
        return _run_conda_command(module, command)

    run_package_operation(
        conda, name, version, state, module.check_mode, command_runner, module.fail_json, module.exit_json)
def main():
    argument_spec = vmware_argument_spec()
    argument_spec.update(
        name=dict(type='str'),
        uuid=dict(type='str'),
        folder=dict(type='str'),
        datacenter=dict(required=True, type='str'),
    )
    module = AnsibleModule(argument_spec=argument_spec,
                           required_together=[['name', 'folder']],
                           required_one_of=[['name', 'uuid']],
                           )

    if module.params['folder']:
        # FindByInventoryPath() does not require an absolute path
        # so we should leave the input folder path unmodified
        module.params['folder'] = module.params['folder'].rstrip('/')

    pyv = PyVmomiHelper(module)
    # Check if the VM exists before continuing
    vm = pyv.get_vm()

    if not vm:
        # If UUID is set, getvm select UUID, show error message accordingly.
        module.fail_json(msg="Unable to gather facts about snapshots for"
                             " non-existing VM ['%s']" % (module.params.get('uuid') or
                                                          module.params.get('name')))

    results = dict(changed=False, guest_snapshots=pyv.gather_guest_snapshot_facts(vm_obj=vm))
    module.exit_json(**results)
def main():
    argument_spec = vmware_argument_spec()
    argument_spec.update(dict(datacenter=dict(required=True, type='str'),
                              cluster=dict(required=True, type='str'),
                              resource_pool=dict(required=True, type='str'),
                              mem_shares=dict(type='str', default="normal", choices=[
                                              'high', 'custom', 'normal', 'low']),
                              mem_limit=dict(type='int', default=-1),
                              mem_reservation=dict(type='int', default=0),
                              mem_expandable_reservations=dict(
                                  type='bool', default="True"),
                              cpu_shares=dict(type='str', default="normal", choices=[
                                              'high', 'custom', 'normal', 'low']),
                              cpu_limit=dict(type='int', default=-1),
                              cpu_reservation=dict(type='int', default=0),
                              cpu_expandable_reservations=dict(
                                  type='bool', default="True"),
                              state=dict(default='present', choices=['present', 'absent'], type='str')))

    module = AnsibleModule(argument_spec=argument_spec,
                           supports_check_mode=True)

    if not HAS_PYVMOMI:
        module.fail_json(msg='pyvmomi is required for this module')

    vmware_rp = VMwareResourcePool(module)
    vmware_rp.process_state()
def main():
    argument_spec = ovirt_facts_full_argument_spec(
        pattern=dict(default='', required=False),
    )
    module = AnsibleModule(argument_spec)
    check_sdk(module)

    try:
        auth = module.params.pop('auth')
        connection = create_connection(auth)
        users_service = connection.system_service().users_service()
        users = users_service.list(search=module.params['pattern'])
        module.exit_json(
            changed=False,
            ansible_facts=dict(
                ovirt_users=[
                    get_dict_of_struct(
                        struct=c,
                        connection=connection,
                        fetch_nested=module.params.get('fetch_nested'),
                        attributes=module.params.get('nested_attributes'),
                    ) for c in users
                ],
            ),
        )
    except Exception as e:
        module.fail_json(msg=str(e), exception=traceback.format_exc())
    finally:
        connection.close(logout=auth.get('token') is None)
def main():
    argument_spec = vmware_argument_spec()
    argument_spec.update(
        name=dict(type='str'),
        datacenter=dict(type='str'),
        cluster=dict(type='str')
    )
    module = AnsibleModule(argument_spec=argument_spec,
                           required_one_of=[
                               ['cluster', 'datacenter'],
                           ],
                           )
    result = dict(changed=False)

    pyv = PyVmomiHelper(module)

    if module.params['cluster']:
        dxs = pyv.lookup_datastore_by_cluster()
    else:
        dxs = pyv.lookup_datastore()

    datastores = list()
    for ds in dxs:
        summary = ds.summary
        dds = dict()
        dds['accessible'] = summary.accessible
        dds['capacity'] = summary.capacity
        dds['name'] = summary.name
        dds['freeSpace'] = summary.freeSpace
        dds['maintenanceMode'] = summary.maintenanceMode
        dds['multipleHostAccess'] = summary.multipleHostAccess
        dds['type'] = summary.type
        # vcsim does not return uncommitted
        if not summary.uncommitted:
            summary.uncommitted = 0
        dds['uncommitted'] = summary.uncommitted
        dds['url'] = summary.url
        # Calculated values
        dds['provisioned'] = summary.capacity - summary.freeSpace + summary.uncommitted
        dds['datastore_cluster'] = 'N/A'
        if isinstance(ds.parent, vim.StoragePod):
            dds['datastore_cluster'] = ds.parent.name

        if module.params['name']:
            if dds['name'] == module.params['name']:
                datastores.extend([dds])
        else:
            datastores.extend([dds])

    result['datastores'] = datastores

    # found a datastore
    if datastores:
        module.exit_json(**result)
    else:
        msg = "Unable to gather datastore facts"
        if module.params['name']:
            msg += " for %(name)s" % module.params
        msg += " in datacenter %(datacenter)s" % module.params
        module.fail_json(msg=msg)
Exemple #29
0
def main():

    module = AnsibleModule(
        argument_spec=dict(
            nsc_host=dict(type='str', required=True),
            nsc_protocol=dict(type='str', default='https'),
            user=dict(type='str', required=True),
            password=dict(type='str', required=True, no_log=True),
            action=dict(type='str', default='enable', choices=['disable', 'enable']),
            name=dict(type='str', default=socket.gethostname()),
            type=dict(type='str', default='server', choices=['server', 'service']),
            validate_certs=dict(type='bool', default=True),
        ),
    )

    rc = 0
    try:
        rc, result = core(module)
    except Exception as e:
        module.fail_json(msg=to_native(e), exception=traceback.format_exc())

    if rc != 0:
        module.fail_json(rc=rc, msg=result)
    else:
        result['changed'] = True
        module.exit_json(**result)
Exemple #30
0
def main():

    argument_spec = ec2_argument_spec()
    argument_spec.update(
        dict(
            name=dict(required=True, type='str'),
            managed_policy=dict(default=[], type='list'),
            state=dict(choices=['present', 'absent'], required=True),
            purge_policy=dict(default=False, type='bool')
        )
    )

    module = AnsibleModule(
        argument_spec=argument_spec,
        supports_check_mode=True
    )
    if not HAS_BOTO3:
        module.fail_json(msg='boto3 required for this module')

    region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)

    connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_params)

    state = module.params.get("state")

    if state == 'present':
        create_or_update_user(connection, module)
    else:
        destroy_user(connection, module)
Exemple #31
0
def main():
    argument_spec = ansible.module_utils.ec2.ec2_argument_spec()
    argument_spec.update(
        dict(
            stack_name=dict(required=True),
            template_parameters=dict(required=False, type='dict', default={}),
            state=dict(default='present', choices=['present', 'absent']),
            template=dict(default=None, required=False, type='path'),
            notification_arns=dict(default=None, required=False),
            stack_policy=dict(default=None, required=False),
            disable_rollback=dict(default=False, type='bool'),
            create_timeout=dict(default=None, type='int'),
            template_url=dict(default=None, required=False),
            template_body=dict(default=None, require=False),
            template_format=dict(default=None,
                                 choices=['json', 'yaml'],
                                 required=False),
            create_changeset=dict(default=False, type='bool'),
            changeset_name=dict(default=None, required=False),
            role_arn=dict(default=None, required=False),
            tags=dict(default=None, type='dict'),
            termination_protection=dict(default=None, type='bool'),
            events_limit=dict(default=200, type='int'),
        ))

    module = AnsibleModule(
        argument_spec=argument_spec,
        mutually_exclusive=[['template_url', 'template', 'template_body']],
        supports_check_mode=True)
    if not HAS_BOTO3:
        module.fail_json(msg='boto3 and botocore are required for this module')

    # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around.
    stack_params = {
        'Capabilities': ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM'],
        'ClientRequestToken': to_native(uuid.uuid4()),
    }
    state = module.params['state']
    stack_params['StackName'] = module.params['stack_name']

    if module.params['template'] is not None:
        stack_params['TemplateBody'] = open(module.params['template'],
                                            'r').read()
    elif module.params['template_body'] is not None:
        stack_params['TemplateBody'] = module.params['template_body']
    elif module.params['template_url'] is not None:
        stack_params['TemplateURL'] = module.params['template_url']

    if module.params.get('notification_arns'):
        stack_params['NotificationARNs'] = module.params[
            'notification_arns'].split(',')
    else:
        stack_params['NotificationARNs'] = []

    # can't check the policy when verifying.
    if module.params[
            'stack_policy'] is not None and not module.check_mode and not module.params[
                'create_changeset']:
        stack_params['StackPolicyBody'] = open(module.params['stack_policy'],
                                               'r').read()

    template_parameters = module.params['template_parameters']

    stack_params['Parameters'] = []
    for k, v in template_parameters.items():
        if isinstance(v, dict):
            # set parameter based on a dict to allow additional CFN Parameter Attributes
            param = dict(ParameterKey=k)

            if 'value' in v:
                param['ParameterValue'] = str(v['value'])

            if 'use_previous_value' in v and bool(v['use_previous_value']):
                param['UsePreviousValue'] = True
                param.pop('ParameterValue', None)

            stack_params['Parameters'].append(param)
        else:
            # allow default k/v configuration to set a template parameter
            stack_params['Parameters'].append({
                'ParameterKey': k,
                'ParameterValue': str(v)
            })

    if isinstance(module.params.get('tags'), dict):
        stack_params[
            'Tags'] = ansible.module_utils.ec2.ansible_dict_to_boto3_tag_list(
                module.params['tags'])

    if module.params.get('role_arn'):
        stack_params['RoleARN'] = module.params['role_arn']

    result = {}

    try:
        region, ec2_url, aws_connect_kwargs = ansible.module_utils.ec2.get_aws_connection_info(
            module, boto3=True)
        cfn = ansible.module_utils.ec2.boto3_conn(module,
                                                  conn_type='client',
                                                  resource='cloudformation',
                                                  region=region,
                                                  endpoint=ec2_url,
                                                  **aws_connect_kwargs)
    except botocore.exceptions.NoCredentialsError as e:
        module.fail_json(msg=boto_exception(e))

    # Wrap the cloudformation client methods that this module uses with
    # automatic backoff / retry for throttling error codes
    backoff_wrapper = AWSRetry.jittered_backoff(retries=10,
                                                delay=3,
                                                max_delay=30)
    cfn.describe_stack_events = backoff_wrapper(cfn.describe_stack_events)
    cfn.create_stack = backoff_wrapper(cfn.create_stack)
    cfn.list_change_sets = backoff_wrapper(cfn.list_change_sets)
    cfn.create_change_set = backoff_wrapper(cfn.create_change_set)
    cfn.update_stack = backoff_wrapper(cfn.update_stack)
    cfn.describe_stacks = backoff_wrapper(cfn.describe_stacks)
    cfn.list_stack_resources = backoff_wrapper(cfn.list_stack_resources)
    cfn.delete_stack = backoff_wrapper(cfn.delete_stack)
    if boto_supports_termination_protection(cfn):
        cfn.update_termination_protection = backoff_wrapper(
            cfn.update_termination_protection)

    stack_info = get_stack_facts(cfn, stack_params['StackName'])

    if module.check_mode:
        if state == 'absent' and stack_info:
            module.exit_json(changed=True,
                             msg='Stack would be deleted',
                             meta=[])
        elif state == 'absent' and not stack_info:
            module.exit_json(changed=False,
                             msg='Stack doesn\'t exist',
                             meta=[])
        elif state == 'present' and not stack_info:
            module.exit_json(changed=True,
                             msg='New stack would be created',
                             meta=[])
        else:
            module.exit_json(**check_mode_changeset(module, stack_params, cfn))

    if state == 'present':
        if not stack_info:
            result = create_stack(module, stack_params, cfn,
                                  module.params.get('events_limit'))
        elif module.params.get('create_changeset'):
            result = create_changeset(module, stack_params, cfn,
                                      module.params.get('events_limit'))
        else:
            if module.params.get('termination_protection') is not None:
                update_termination_protection(
                    module, cfn, stack_params['StackName'],
                    bool(module.params.get('termination_protection')))
            result = update_stack(module, stack_params, cfn,
                                  module.params.get('events_limit'))

        # format the stack output

        stack = get_stack_facts(cfn, stack_params['StackName'])
        if result.get('stack_outputs') is None:
            # always define stack_outputs, but it may be empty
            result['stack_outputs'] = {}
        for output in stack.get('Outputs', []):
            result['stack_outputs'][
                output['OutputKey']] = output['OutputValue']
        stack_resources = []
        reslist = cfn.list_stack_resources(StackName=stack_params['StackName'])
        for res in reslist.get('StackResourceSummaries', []):
            stack_resources.append({
                "logical_resource_id":
                res['LogicalResourceId'],
                "physical_resource_id":
                res.get('PhysicalResourceId', ''),
                "resource_type":
                res['ResourceType'],
                "last_updated_time":
                res['LastUpdatedTimestamp'],
                "status":
                res['ResourceStatus'],
                "status_reason":
                res.get('ResourceStatusReason')  # can be blank, apparently
            })
        result['stack_resources'] = stack_resources

    elif state == 'absent':
        # absent state is different because of the way delete_stack works.
        # problem is it it doesn't give an error if stack isn't found
        # so must describe the stack first

        try:
            stack = get_stack_facts(cfn, stack_params['StackName'])
            if not stack:
                result = {'changed': False, 'output': 'Stack not found.'}
            else:
                if stack_params.get('RoleARN') is None:
                    cfn.delete_stack(StackName=stack_params['StackName'])
                else:
                    cfn.delete_stack(StackName=stack_params['StackName'],
                                     RoleARN=stack_params['RoleARN'])
                result = stack_operation(
                    cfn, stack_params['StackName'], 'DELETE',
                    module.params.get('events_limit'),
                    stack_params.get('ClientRequestToken', None))
        except Exception as err:
            module.fail_json(msg=boto_exception(err),
                             exception=traceback.format_exc())

    if module.params['template_format'] is not None:
        result['warnings'] = [
            ('Argument `template_format` is deprecated '
             'since Ansible 2.3, JSON and YAML templates are now passed '
             'directly to the CloudFormation API.')
        ]
    module.exit_json(**result)
Exemple #32
0
def main():
    argument_spec = dict(group=dict(required=True, type='str'),
                         interface=dict(required=True),
                         version=dict(choices=['1', '2'],
                                      default='2',
                                      required=False),
                         priority=dict(type='str', required=False),
                         preempt=dict(type='str',
                                      choices=['disabled', 'enabled'],
                                      required=False),
                         vip=dict(type='str', required=False),
                         auth_type=dict(choices=['text', 'md5'],
                                        required=False),
                         auth_string=dict(type='str', required=False),
                         state=dict(choices=['absent', 'present'],
                                    required=False,
                                    default='present'),
                         include_defaults=dict(default=True),
                         config=dict(),
                         save=dict(type='bool', default=False))

    argument_spec.update(nxos_argument_spec)

    module = AnsibleModule(argument_spec=argument_spec,
                           supports_check_mode=True)

    warnings = list()
    check_args(module, warnings)

    interface = module.params['interface'].lower()
    group = module.params['group']
    version = module.params['version']
    state = module.params['state']
    priority = module.params['priority']
    preempt = module.params['preempt']
    vip = module.params['vip']
    auth_type = module.params['auth_type']
    auth_string = module.params['auth_string']

    transport = module.params['transport']

    if state == 'present' and not vip:
        module.fail_json(msg='the "vip" param is required when state=present')

    for param in ['group', 'priority']:
        if module.params[param] is not None:
            validate_params(param, module)

    intf_type = get_interface_type(interface)
    if (intf_type != 'ethernet' and transport == 'cli'):
        if is_default(interface, module) == 'DNE':
            module.fail_json(msg='That interface does not exist yet. Create '
                             'it first.',
                             interface=interface)
        if intf_type == 'loopback':
            module.fail_json(msg="Loopback interfaces don't support HSRP.",
                             interface=interface)

    mode = get_interface_mode(interface, intf_type, module)
    if mode == 'layer2':
        module.fail_json(msg='That interface is a layer2 port.\nMake it '
                         'a layer 3 port first.',
                         interface=interface)

    if auth_type or auth_string:
        if not (auth_type and auth_string):
            module.fail_json(msg='When using auth parameters, you need BOTH '
                             'auth_type AND auth_string.')

    args = dict(group=group,
                version=version,
                priority=priority,
                preempt=preempt,
                vip=vip,
                auth_type=auth_type,
                auth_string=auth_string)

    proposed = dict((k, v) for k, v in args.items() if v is not None)

    existing = get_hsrp_group(group, interface, module)

    # This will enforce better practice with md5 and hsrp version.
    if proposed.get('auth_type', None) == 'md5':
        if proposed['version'] == '1':
            module.fail_json(msg="It's recommended to use HSRP v2 "
                             "when auth_type=md5")

    elif not proposed.get('auth_type', None) and existing:
        if (proposed['version'] == '1' and existing['auth_type'] == 'md5'):
            module.fail_json(msg="Existing auth_type is md5. It's recommended "
                             "to use HSRP v2 when using md5")

    changed = False
    end_state = existing
    commands = []
    if state == 'present':
        delta = dict(set(proposed.items()).difference(existing.items()))
        if delta:
            command = get_commands_config_hsrp(delta, interface, args)
            commands.extend(command)

    elif state == 'absent':
        if existing:
            command = get_commands_remove_hsrp(group, interface)
            commands.extend(command)

    if commands:
        if module.check_mode:
            module.exit_json(changed=True, commands=commands)
        else:
            load_config(module, commands)
            if transport == 'cli':
                validate_config(body, vip, module)
            changed = True
            end_state = get_hsrp_group(group, interface, module)
            if 'configure' in commands:
                commands.pop(0)

    results = {}
    results['proposed'] = proposed
    results['existing'] = existing
    results['end_state'] = end_state
    results['updates'] = commands
    results['changed'] = changed
    results['warnings'] = warnings

    module.exit_json(**results)
def main():
    fields = {
        "host": {"required": False, "type": "str"},
        "username": {"required": False, "type": "str"},
        "password": {"required": False, "type": "str", "default": "", "no_log": True},
        "vdom": {"required": False, "type": "str", "default": "root"},
        "https": {"required": False, "type": "bool", "default": True},
        "ssl_verify": {"required": False, "type": "bool", "default": True},
        "state": {"required": True, "type": "str",
                  "choices": ["present", "absent"]},
        "vpn_ipsec_manualkey_interface": {
            "required": False, "type": "dict", "default": None,
            "options": {
                "addr_type": {"required": False, "type": "str",
                              "choices": ["4", "6"]},
                "auth_alg": {"required": False, "type": "str",
                             "choices": ["null", "md5", "sha1",
                                         "sha256", "sha384", "sha512"]},
                "auth_key": {"required": False, "type": "str"},
                "enc_alg": {"required": False, "type": "str",
                            "choices": ["null", "des"]},
                "enc_key": {"required": False, "type": "str"},
                "interface": {"required": False, "type": "str"},
                "ip_version": {"required": False, "type": "str",
                               "choices": ["4", "6"]},
                "local_gw": {"required": False, "type": "str"},
                "local_gw6": {"required": False, "type": "str"},
                "local_spi": {"required": False, "type": "str"},
                "name": {"required": True, "type": "str"},
                "remote_gw": {"required": False, "type": "str"},
                "remote_gw6": {"required": False, "type": "str"},
                "remote_spi": {"required": False, "type": "str"}

            }
        }
    }

    module = AnsibleModule(argument_spec=fields,
                           supports_check_mode=False)

    # legacy_mode refers to using fortiosapi instead of HTTPAPI
    legacy_mode = 'host' in module.params and module.params['host'] is not None and \
                  'username' in module.params and module.params['username'] is not None and \
                  'password' in module.params and module.params['password'] is not None

    if not legacy_mode:
        if module._socket_path:
            connection = Connection(module._socket_path)
            fos = FortiOSHandler(connection)

            is_error, has_changed, result = fortios_vpn_ipsec(module.params, fos)
        else:
            module.fail_json(**FAIL_SOCKET_MSG)
    else:
        try:
            from fortiosapi import FortiOSAPI
        except ImportError:
            module.fail_json(msg="fortiosapi module is required")

        fos = FortiOSAPI()

        login(module.params, fos)
        is_error, has_changed, result = fortios_vpn_ipsec(module.params, fos)
        fos.logout()

    if not is_error:
        module.exit_json(changed=has_changed, meta=result)
    else:
        module.fail_json(msg="Error in repo", meta=result)
def main():
    argument_spec = dict(
        local_file=dict(type='str'),
        remote_file=dict(type='str'),
        file_system=dict(required=False, default='bootflash:'),
        connect_ssh_port=dict(required=False, type='int', default=22),
        file_pull=dict(type='bool', default=False),
        file_pull_timeout=dict(type='int', default=300),
        local_file_directory=dict(required=False, type='str'),
        remote_scp_server=dict(type='str'),
        remote_scp_server_user=dict(type='str'),
        remote_scp_server_password=dict(no_log=True),
    )

    argument_spec.update(nxos_argument_spec)

    required_if = [("file_pull", True, ["remote_file", "remote_scp_server"]),
                   ("file_pull", False, ["local_file"])]

    required_together = [['remote_scp_server',
                          'remote_scp_server_user',
                          'remote_scp_server_password']]

    module = AnsibleModule(argument_spec=argument_spec,
                           required_if=required_if,
                           required_together=required_together,
                           supports_check_mode=True)

    file_pull = module.params['file_pull']

    if file_pull:
        if not HAS_PEXPECT:
            module.fail_json(
                msg='library pexpect is required when file_pull is True but does not appear to be '
                    'installed. It can be installed using `pip install pexpect`'
            )
    else:
        if not HAS_PARAMIKO:
            module.fail_json(
                msg='library paramiko is required when file_pull is False but does not appear to be '
                    'installed. It can be installed using `pip install paramiko`'
            )

        if not HAS_SCP:
            module.fail_json(
                msg='library scp is required when file_pull is False but does not appear to be '
                    'installed. It can be installed using `pip install scp`'
            )
    warnings = list()
    check_args(module, warnings)
    results = dict(changed=False, warnings=warnings)

    local_file = module.params['local_file']
    remote_file = module.params['remote_file']
    file_system = module.params['file_system']
    local_file_directory = module.params['local_file_directory']

    results['transfer_status'] = 'No Transfer'
    results['file_system'] = file_system

    if file_pull:
        src = remote_file.split('/')[-1]
        local = local_file or src

        if not module.check_mode:
            copy_file_from_remote(module, local, local_file_directory, file_system=file_system)
            results['transfer_status'] = 'Received'

        results['changed'] = True
        results['remote_file'] = src
        results['local_file'] = local
    else:
        if not local_file_exists(module):
            module.fail_json(msg="Local file {0} not found".format(local_file))

        dest = remote_file or os.path.basename(local_file)
        remote_exists = remote_file_exists(module, dest, file_system=file_system)

        if not remote_exists:
            results['changed'] = True
            file_exists = False
        else:
            file_exists = True

        if not module.check_mode and not file_exists:
            transfer_file_to_device(module, dest)
            results['transfer_status'] = 'Sent'

        results['local_file'] = local_file
        if remote_file is None:
            remote_file = os.path.basename(local_file)
        results['remote_file'] = remote_file

    module.exit_json(**results)
Exemple #35
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            name=dict(type='str', required=True, aliases=['pkg']),
            question=dict(type='str', aliases=['selection', 'setting']),
            vtype=dict(type='str',
                       choices=[
                           'boolean', 'error', 'multiselect', 'note',
                           'password', 'seen', 'select', 'string', 'text',
                           'title'
                       ]),
            value=dict(type='str', aliases=['answer']),
            unseen=dict(type='bool'),
        ),
        required_together=(['question', 'vtype', 'value'], ),
        supports_check_mode=True,
    )

    # TODO: enable passing array of options and/or debconf file from get-selections dump
    pkg = module.params["name"]
    question = module.params["question"]
    vtype = module.params["vtype"]
    value = module.params["value"]
    unseen = module.params["unseen"]

    prev = get_selections(module, pkg)

    changed = False
    msg = ""

    if question is not None:
        if vtype is None or value is None:
            module.fail_json(
                msg=
                "when supplying a question you must supply a valid vtype and value"
            )

        if question not in prev or prev[question] != value:
            changed = True

    if changed:
        if not module.check_mode:
            rc, msg, e = set_selection(module, pkg, question, vtype, value,
                                       unseen)
            if rc:
                module.fail_json(msg=e)

        curr = {question: value}
        if question in prev:
            prev = {question: prev[question]}
        else:
            prev[question] = ''
        if module._diff:
            after = prev.copy()
            after.update(curr)
            diff_dict = {'before': prev, 'after': after}
        else:
            diff_dict = {}

        module.exit_json(changed=changed,
                         msg=msg,
                         current=curr,
                         previous=prev,
                         diff=diff_dict)

    module.exit_json(changed=changed, msg=msg, current=prev)
Exemple #36
0
class AnsibleDockerClient(Client):
    def __init__(self,
                 argument_spec=None,
                 supports_check_mode=False,
                 mutually_exclusive=None,
                 required_together=None,
                 required_if=None):

        merged_arg_spec = dict()
        merged_arg_spec.update(DOCKER_COMMON_ARGS)
        if argument_spec:
            merged_arg_spec.update(argument_spec)
            self.arg_spec = merged_arg_spec

        mutually_exclusive_params = []
        mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
        if mutually_exclusive:
            mutually_exclusive_params += mutually_exclusive

        required_together_params = []
        required_together_params += DOCKER_REQUIRED_TOGETHER
        if required_together:
            required_together_params += required_together

        self.module = AnsibleModule(
            argument_spec=merged_arg_spec,
            supports_check_mode=supports_check_mode,
            mutually_exclusive=mutually_exclusive_params,
            required_together=required_together_params,
            required_if=required_if)

        if not HAS_DOCKER_PY:
            self.fail(
                "Failed to import docker-py - %s. Try `pip install docker-py`"
                % HAS_DOCKER_ERROR)

        if LooseVersion(docker_version) < LooseVersion(MIN_DOCKER_VERSION):
            self.fail(
                "Error: docker-py version is %s. Minimum version required is %s."
                % (docker_version, MIN_DOCKER_VERSION))

        self.debug = self.module.params.get('debug')
        self.check_mode = self.module.check_mode
        self._connect_params = self._get_connect_params()

        try:
            super(AnsibleDockerClient, self).__init__(**self._connect_params)
        except APIError as exc:
            self.fail("Docker API error: %s" % exc)
        except Exception as exc:
            self.fail("Error connecting: %s" % exc)

    def log(self, msg, pretty_print=False):
        pass
        # if self.debug:
        #     log_file = open('docker.log', 'a')
        #     if pretty_print:
        #         log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
        #         log_file.write(u'\n')
        #     else:
        #         log_file.write(msg + u'\n')

    def fail(self, msg):
        self.module.fail_json(msg=msg)

    @staticmethod
    def _get_value(param_name, param_value, env_variable, default_value):
        if param_value is not None:
            # take module parameter value
            if param_value in BOOLEANS_TRUE:
                return True
            if param_value in BOOLEANS_FALSE:
                return False
            return param_value

        if env_variable is not None:
            env_value = os.environ.get(env_variable)
            if env_value is not None:
                # take the env variable value
                if param_name == 'cert_path':
                    return os.path.join(env_value, 'cert.pem')
                if param_name == 'cacert_path':
                    return os.path.join(env_value, 'ca.pem')
                if param_name == 'key_path':
                    return os.path.join(env_value, 'key.pem')
                if env_value in BOOLEANS_TRUE:
                    return True
                if env_value in BOOLEANS_FALSE:
                    return False
                return env_value

        # take the default
        return default_value

    @property
    def auth_params(self):
        # Get authentication credentials.
        # Precedence: module parameters-> environment variables-> defaults.

        self.log('Getting credentials')

        params = dict()
        for key in DOCKER_COMMON_ARGS:
            params[key] = self.module.params.get(key)

        if self.module.params.get('use_tls'):
            # support use_tls option in docker_image.py. This will be deprecated.
            use_tls = self.module.params.get('use_tls')
            if use_tls == 'encrypt':
                params['tls'] = True
            if use_tls == 'verify':
                params['tls_verify'] = True

        result = dict(
            docker_host=self._get_value('docker_host', params['docker_host'],
                                        'DOCKER_HOST', DEFAULT_DOCKER_HOST),
            tls_hostname=self._get_value('tls_hostname',
                                         params['tls_hostname'],
                                         'DOCKER_TLS_HOSTNAME', 'localhost'),
            api_version=self._get_value('api_version', params['api_version'],
                                        'DOCKER_API_VERSION', 'auto'),
            cacert_path=self._get_value('cacert_path', params['cacert_path'],
                                        'DOCKER_CERT_PATH', None),
            cert_path=self._get_value('cert_path', params['cert_path'],
                                      'DOCKER_CERT_PATH', None),
            key_path=self._get_value('key_path', params['key_path'],
                                     'DOCKER_CERT_PATH', None),
            ssl_version=self._get_value('ssl_version', params['ssl_version'],
                                        'DOCKER_SSL_VERSION', None),
            tls=self._get_value('tls', params['tls'], 'DOCKER_TLS',
                                DEFAULT_TLS),
            tls_verify=self._get_value('tls_verfy', params['tls_verify'],
                                       'DOCKER_TLS_VERIFY',
                                       DEFAULT_TLS_VERIFY),
            timeout=self._get_value('timeout', params['timeout'],
                                    'DOCKER_TIMEOUT', DEFAULT_TIMEOUT_SECONDS),
        )

        if result['tls_hostname'] is None:
            # get default machine name from the url
            parsed_url = urlparse(result['docker_host'])
            if ':' in parsed_url.netloc:
                result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.
                                                           rindex(':')]
            else:
                result['tls_hostname'] = parsed_url

        return result

    def _get_tls_config(self, **kwargs):
        self.log("get_tls_config:")
        for key in kwargs:
            self.log("  %s: %s" % (key, kwargs[key]))
        try:
            tls_config = TLSConfig(**kwargs)
            return tls_config
        except TLSParameterError as exc:
            self.fail("TLS config error: %s" % exc)

    def _get_connect_params(self):
        auth = self.auth_params

        self.log("connection params:")
        for key in auth:
            self.log("  %s: %s" % (key, auth[key]))

        if auth['tls'] or auth['tls_verify']:
            auth['docker_host'] = auth['docker_host'].replace(
                'tcp://', 'https://')

        if auth['tls'] and auth['cert_path'] and auth['key_path']:
            # TLS with certs and no host verification
            tls_config = self._get_tls_config(client_cert=(auth['cert_path'],
                                                           auth['key_path']),
                                              verify=False,
                                              ssl_version=auth['ssl_version'])
            return dict(base_url=auth['docker_host'],
                        tls=tls_config,
                        version=auth['api_version'],
                        timeout=auth['timeout'])

        if auth['tls']:
            # TLS with no certs and not host verification
            tls_config = self._get_tls_config(verify=False,
                                              ssl_version=auth['ssl_version'])
            return dict(base_url=auth['docker_host'],
                        tls=tls_config,
                        version=auth['api_version'],
                        timeout=auth['timeout'])

        if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
            # TLS with certs and host verification
            if auth['cacert_path']:
                tls_config = self._get_tls_config(
                    client_cert=(auth['cert_path'], auth['key_path']),
                    ca_cert=auth['cacert_path'],
                    verify=True,
                    assert_hostname=auth['tls_hostname'],
                    ssl_version=auth['ssl_version'])
            else:
                tls_config = self._get_tls_config(
                    client_cert=(auth['cert_path'], auth['key_path']),
                    verify=True,
                    assert_hostname=auth['tls_hostname'],
                    ssl_version=auth['ssl_version'])

            return dict(base_url=auth['docker_host'],
                        tls=tls_config,
                        version=auth['api_version'],
                        timeout=auth['timeout'])

        if auth['tls_verify'] and auth['cacert_path']:
            # TLS with cacert only
            tls_config = self._get_tls_config(
                ca_cert=auth['cacert_path'],
                assert_hostname=auth['tls_hostname'],
                verify=True,
                ssl_version=auth['ssl_version'])
            return dict(base_url=auth['docker_host'],
                        tls=tls_config,
                        version=auth['api_version'],
                        timeout=auth['timeout'])

        if auth['tls_verify']:
            # TLS with verify and no certs
            tls_config = self._get_tls_config(
                verify=True,
                assert_hostname=auth['tls_hostname'],
                ssl_version=auth['ssl_version'])
            return dict(base_url=auth['docker_host'],
                        tls=tls_config,
                        version=auth['api_version'],
                        timeout=auth['timeout'])
        # No TLS
        return dict(base_url=auth['docker_host'],
                    version=auth['api_version'],
                    timeout=auth['timeout'])

    def _handle_ssl_error(self, error):
        match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
        if match:
            msg = "You asked for verification that Docker host name matches %s. The actual hostname is %s. " \
                "Most likely you need to set DOCKER_TLS_HOSTNAME or pass tls_hostname with a value of %s. " \
                "You may also use TLS without verification by setting the tls parameter to true." \
                %  (self.auth_params['tls_hostname'], match.group(1), match.group(1))
            self.fail(msg)
        self.fail("SSL Exception: %s" % (error))

    def get_container(self, name=None):
        '''
        Lookup a container and return the inspection results.
        '''
        if name is None:
            return None

        search_name = name
        if not name.startswith('/'):
            search_name = '/' + name

        result = None
        try:
            for container in self.containers(all=True):
                self.log("testing container: %s" % (container['Names']))
                if isinstance(container['Names'],
                              list) and search_name in container['Names']:
                    result = container
                    break
                if container['Id'].startswith(name):
                    result = container
                    break
                if container['Id'] == name:
                    result = container
                    break
        except SSLError as exc:
            self._handle_ssl_error(exc)
        except Exception as exc:
            self.fail("Error retrieving container list: %s" % exc)

        if result is not None:
            try:
                self.log("Inspecting container Id %s" % result['Id'])
                result = self.inspect_container(container=result['Id'])
                self.log("Completed container inspection")
            except Exception as exc:
                self.fail("Error inspecting container: %s" % exc)

        return result

    def find_image(self, name, tag):
        '''
        Lookup an image and return the inspection results.
        '''
        if not name:
            return None

        self.log("Find image %s:%s" % (name, tag))
        images = self._image_lookup(name, tag)
        if len(images) == 0:
            # In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
            registry, repo_name = auth.resolve_repository_name(name)
            if registry == 'docker.io':
                # the name does not contain a registry, so let's see if docker.io works
                lookup = "docker.io/%s" % name
                self.log("Check for docker.io image: %s" % lookup)
                images = self._image_lookup(lookup, tag)

        if len(images) > 1:
            self.fail("Registry returned more than one result for %s:%s" %
                      (name, tag))

        if len(images) == 1:
            try:
                inspection = self.inspect_image(images[0]['Id'])
            except Exception as exc:
                self.fail("Error inspecting image %s:%s - %s" %
                          (name, tag, str(exc)))
            return inspection

        self.log("Image %s:%s not found." % (name, tag))
        return None

    def _image_lookup(self, name, tag):
        '''
        Including a tag in the name parameter sent to the docker-py images method does not
        work consistently. Instead, get the result set for name and manually check if the tag
        exists.
        '''
        try:
            response = self.images(name=name)
        except Exception as exc:
            self.fail("Error searching for image %s - %s" % (name, str(exc)))
        images = response
        if tag:
            lookup = "%s:%s" % (name, tag)
            images = []
            for image in response:
                tags = image.get('RepoTags')
                if tags and lookup in tags:
                    images = [image]
                    break
        return images

    def pull_image(self, name, tag="latest"):
        '''
        Pull an image
        '''
        self.log("Pulling image %s:%s" % (name, tag))
        alreadyToLatest = False
        try:
            for line in self.pull(name, tag=tag, stream=True, decode=True):
                self.log(line, pretty_print=True)
                if line.get('status'):
                    if line.get('status').startswith(
                            'Status: Image is up to date for'):
                        alreadyToLatest = True
                if line.get('error'):
                    if line.get('errorDetail'):
                        error_detail = line.get('errorDetail')
                        self.fail("Error pulling %s - code: %s message: %s" %
                                  (name, error_detail.get('code'),
                                   error_detail.get('message')))
                    else:
                        self.fail("Error pulling %s - %s" %
                                  (name, line.get('error')))
        except Exception as exc:
            self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))

        return self.find_image(name, tag), alreadyToLatest
class NtpAuth(object):
    """Manage ntp authentication"""

    def __init__(self, argument_spec):
        self.spec = argument_spec
        self.module = None
        self.init_module()

        # ntp_auth configration info
        self.key_id = self.module.params['key_id']
        self.password = self.module.params['auth_pwd'] or None
        self.auth_mode = self.module.params['auth_mode'] or None
        self.auth_type = self.module.params['auth_type']
        self.trusted_key = self.module.params['trusted_key']
        self.authentication = self.module.params['authentication'] or None
        self.state = self.module.params['state']
        self.check_params()

        self.ntp_auth_conf = dict()
        self.key_id_exist = False
        self.cur_trusted_key = 'disable'

        # state
        self.changed = False
        self.updates_cmd = list()
        self.results = dict()
        self.proposed = dict()
        self.existing = list()
        self.end_state = list()

        self.get_ntp_auth_exist_config()

    def check_params(self):
        """Check all input params"""

        if not self.key_id.isdigit():
            self.module.fail_json(
                msg='Error: key_id is not digit.')

        if (int(self.key_id) < 1) or (int(self.key_id) > 4294967295):
            self.module.fail_json(
                msg='Error: The length of key_id is between 1 and 4294967295.')

        if self.state == "present":
            if (self.auth_type == 'encrypt') and\
                    ((len(self.password) < 20) or (len(self.password) > 392)):
                self.module.fail_json(
                    msg='Error: The length of encrypted password is between 20 and 392.')
            elif (self.auth_type == 'text') and\
                    ((len(self.password) < 1) or (len(self.password) > 255)):
                self.module.fail_json(
                    msg='Error: The length of text password is between 1 and 255.')

    def init_module(self):
        """Init module object"""

        required_if = [("state", "present", ("password", "auth_mode"))]
        self.module = AnsibleModule(
            argument_spec=self.spec,
            required_if=required_if,
            supports_check_mode=True
        )

    def check_response(self, xml_str, xml_name):
        """Check if response message is already sucjctanner.network_cloudengine.ceed."""

        if "<ok/>" not in xml_str:
            self.module.fail_json(msg='Error: %s failed.' % xml_name)

    def get_ntp_auth_enable(self):
        """Get ntp authentication enable state"""

        xml_str = CE_NC_GET_NTP_AUTH_ENABLE
        con_obj = get_nc_config(self.module, xml_str)
        if "<data/>" in con_obj:
            return

        # get ntp authentication enable
        auth_en = re.findall(
            r'.*<isAuthEnable>(.*)</isAuthEnable>.*', con_obj)
        if auth_en:
            if auth_en[0] == 'true':
                self.ntp_auth_conf['authentication'] = 'enable'
            else:
                self.ntp_auth_conf['authentication'] = 'disable'

    def get_ntp_all_auth_keyid(self):
        """Get all authentication keyid info"""

        ntp_auth_conf = list()

        xml_str = CE_NC_GET_ALL_NTP_AUTH_CONFIG
        con_obj = get_nc_config(self.module, xml_str)
        if "<data/>" in con_obj:
            self.ntp_auth_conf["authentication-keyid"] = "None"
            return ntp_auth_conf

        # get ntp authentication config
        ntp_auth = re.findall(
            r'.*<keyId>(.*)</keyId>.*\s*<mode>(.*)</mode>.*\s*'
            r'<keyVal>(.*)</keyVal>.*\s*<isReliable>(.*)</isReliable>.*', con_obj)

        for ntp_auth_num in ntp_auth:
            if ntp_auth_num[0] == self.key_id:
                self.key_id_exist = True
                if ntp_auth_num[3] == 'true':
                    self.cur_trusted_key = 'enable'
                else:
                    self.cur_trusted_key = 'disable'

            if ntp_auth_num[3] == 'true':
                trusted_key = 'enable'
            else:
                trusted_key = 'disable'
            ntp_auth_conf.append(dict(key_id=ntp_auth_num[0],
                                      auth_mode=ntp_auth_num[1].lower(),
                                      trusted_key=trusted_key))
        self.ntp_auth_conf["authentication-keyid"] = ntp_auth_conf

        return ntp_auth_conf

    def get_ntp_auth_exist_config(self):
        """Get ntp authentication existed configure"""

        self.get_ntp_auth_enable()
        self.get_ntp_all_auth_keyid()

    def config_ntp_auth_keyid(self):
        """Config ntp authentication keyid"""

        if self.trusted_key == 'enable':
            trusted_key = 'true'
        else:
            trusted_key = 'false'
        xml_str = CE_NC_MERGE_NTP_AUTH_CONFIG % (
            self.key_id, self.auth_mode.upper(), self.password, trusted_key)
        ret_xml = set_nc_config(self.module, xml_str)
        self.check_response(ret_xml, "NTP_AUTH_KEYID_CONFIG")

    def config_ntp_auth_enable(self):
        """Config ntp authentication enable"""

        if self.ntp_auth_conf['authentication'] != self.authentication:
            if self.authentication == 'enable':
                state = 'true'
            else:
                state = 'false'
            xml_str = CE_NC_MERGE_NTP_AUTH_ENABLE % state
            ret_xml = set_nc_config(self.module, xml_str)
            self.check_response(ret_xml, "NTP_AUTH_ENABLE")

    def undo_config_ntp_auth_keyid(self):
        """Undo ntp authentication key-id"""

        xml_str = CE_NC_DELETE_NTP_AUTH_CONFIG % self.key_id
        ret_xml = set_nc_config(self.module, xml_str)
        self.check_response(ret_xml, "UNDO_NTP_AUTH_KEYID_CONFIG")

    def cli_load_config(self, commands):
        """Load config by cli"""

        if not self.module.check_mode:
            load_config(self.module, commands)

    def config_ntp_auth_keyid_by_cli(self):
        """Config ntp authentication keyid bye the way of CLI"""

        commands = list()
        config_cli = "ntp authentication-keyid %s authentication-mode %s %s" % (
            self.key_id, self.auth_mode, self.password)
        commands.append(config_cli)
        self.cli_load_config(commands)

    def config_ntp_auth(self):
        """Config ntp authentication"""

        if self.state == "present":
            if self.auth_type == 'encrypt':
                self.config_ntp_auth_keyid()
            else:
                self.config_ntp_auth_keyid_by_cli()
        else:
            if not self.key_id_exist:
                self.module.fail_json(
                    msg='Error: The Authentication-keyid does not exist.')
            self.undo_config_ntp_auth_keyid()

        if self.authentication:
            self.config_ntp_auth_enable()

        self.changed = True

    def get_existing(self):
        """Get existing info"""

        self.existing = copy.deepcopy(self.ntp_auth_conf)

    def get_proposed(self):
        """Get proposed result"""

        auth_type = self.auth_type
        trusted_key = self.trusted_key
        if self.state == 'absent':
            auth_type = None
            trusted_key = None
        self.proposed = dict(key_id=self.key_id, auth_pwd=self.password,
                             auth_mode=self.auth_mode, auth_type=auth_type,
                             trusted_key=trusted_key, authentication=self.authentication,
                             state=self.state)

    def get_update_cmd(self):
        """Get updated commands"""

        cli_str = ""
        if self.state == "present":
            cli_str = "ntp authentication-keyid %s authentication-mode %s " % (
                self.key_id, self.auth_mode)
            if self.auth_type == 'encrypt':
                cli_str = "%s cipher %s" % (cli_str, self.password)
            else:
                cli_str = "%s %s" % (cli_str, self.password)
        else:
            cli_str = "undo ntp authentication-keyid %s" % self.key_id

        self.updates_cmd.append(cli_str)

        if self.authentication:
            cli_str = ""

            if self.ntp_auth_conf['authentication'] != self.authentication:
                if self.authentication == 'enable':
                    cli_str = "ntp authentication enable"
                else:
                    cli_str = "undo ntp authentication enable"

            if cli_str != "":
                self.updates_cmd.append(cli_str)

        cli_str = ""
        if self.state == "present":
            if self.trusted_key != self.cur_trusted_key:
                if self.trusted_key == 'enable':
                    cli_str = "ntp trusted authentication-keyid %s" % self.key_id
                else:
                    cli_str = "undo ntp trusted authentication-keyid %s" % self.key_id
        else:
            cli_str = "undo ntp trusted authentication-keyid %s" % self.key_id

        if cli_str != "":
            self.updates_cmd.append(cli_str)

    def get_end_state(self):
        """Get end state info"""

        self.ntp_auth_conf = dict()
        self.get_ntp_auth_exist_config()
        self.end_state = copy.deepcopy(self.ntp_auth_conf)

    def show_result(self):
        """Show result"""

        self.results['changed'] = self.changed
        self.results['proposed'] = self.proposed
        self.results['existing'] = self.existing
        self.results['end_state'] = self.end_state
        if self.changed:
            self.results['updates'] = self.updates_cmd
        else:
            self.results['updates'] = list()

        self.module.exit_json(**self.results)

    def work(self):
        """Excute task"""

        self.get_existing()
        self.get_proposed()
        self.get_update_cmd()

        self.config_ntp_auth()

        self.get_end_state()
        self.show_result()
def main():
    argument_spec = aci_argument_spec
    argument_spec.update(
        allow_useg=dict(type='str', choices=['encap', 'useg']),
        ap=dict(type='str', aliases=['app_profile', 'app_profile_name']),
        deploy_immediacy=dict(type='str', choices=['immediate', 'on-demand']),
        domain=dict(type='str', aliases=['domain_name', 'domain_profile']),
        domain_type=dict(type='str', choices=['phys', 'vmm'], aliases=['type']),
        encap=dict(type='int'),
        encap_mode=dict(type='str', choices=['auto', 'vlan', 'vxlan']),
        epg=dict(type='str', aliases=['name', 'epg_name']),
        netflow=dict(type='str', choices=['disabled', 'enabled']),
        primary_encap=dict(type='int'),
        resolution_immediacy=dict(type='str', choices=['immediate', 'lazy', 'pre-provision']),
        state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
        tenant=dict(type='str', aliases=['tenant_name']),
        vm_provider=dict(type='str', choices=['microsoft', 'openstack', 'vmware']),
        method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'),  # Deprecated starting from v2.6
    )

    module = AnsibleModule(
        argument_spec=argument_spec,
        supports_check_mode=True,
        required_if=[
            ['domain_type', 'vmm', ['vm_provider']],
            ['state', 'absent', ['ap', 'domain', 'domain_type', 'epg', 'tenant']],
            ['state', 'present', ['ap', 'domain', 'domain_type', 'epg', 'tenant']],
        ],
    )

    allow_useg = module.params['allow_useg']
    ap = module.params['ap']
    deploy_immediacy = module.params['deploy_immediacy']
    domain = module.params['domain']
    domain_type = module.params['domain_type']
    vm_provider = module.params['vm_provider']
    encap = module.params['encap']
    if encap is not None:
        if encap in range(1, 4097):
            encap = 'vlan-{0}'.format(encap)
        else:
            module.fail_json(msg='Valid VLAN assigments are from 1 to 4096')
    encap_mode = module.params['encap_mode']
    epg = module.params['epg']
    netflow = module.params['netflow']
    primary_encap = module.params['primary_encap']
    if primary_encap is not None:
        if primary_encap in range(1, 4097):
            primary_encap = 'vlan-{0}'.format(primary_encap)
        else:
            module.fail_json(msg='Valid VLAN assigments are from 1 to 4096')
    resolution_immediacy = module.params['resolution_immediacy']
    state = module.params['state']
    tenant = module.params['tenant']

    if domain_type == 'phys' and vm_provider is not None:
        module.fail_json(msg="Domain type 'phys' cannot have a 'vm_provider'")

    # Compile the full domain for URL building
    if domain_type == 'vmm':
        epg_domain = '{0}{1}'.format(VM_PROVIDER_MAPPING[vm_provider], domain)
    elif domain_type is not None:
        epg_domain = 'uni/phys-{0}'.format(domain)
    else:
        epg_domain = None

    aci = ACIModule(module)
    aci.construct_url(
        root_class=dict(
            aci_class='fvTenant',
            aci_rn='tn-{0}'.format(tenant),
            filter_target='eq(fvTenant.name, "{0}")'.format(tenant),
            module_object=tenant,
        ),
        subclass_1=dict(
            aci_class='fvAp',
            aci_rn='ap-{0}'.format(ap),
            filter_target='eq(fvAp.name, "{0}")'.format(ap),
            module_object=ap,
        ),
        subclass_2=dict(
            aci_class='fvAEPg',
            aci_rn='epg-{0}'.format(epg),
            filter_target='eq(fvTenant.name, "{0}")'.format(epg),
            module_object=epg,
        ),
        subclass_3=dict(
            aci_class='fvRsDomAtt',
            aci_rn='rsdomAtt-[{0}]'.format(epg_domain),
            filter_target='eq(fvRsDomAtt.tDn, "{0}")'.format(epg_domain),
            module_object=epg_domain,
        ),
    )

    aci.get_existing()

    if state == 'present':
        # Filter out module parameters with null values
        aci.payload(
            aci_class='fvRsDomAtt',
            class_config=dict(
                classPref=allow_useg,
                encap=encap,
                encapMode=encap_mode,
                instrImedcy=deploy_immediacy,
                netflowPref=netflow,
                primaryEncap=primary_encap,
                resImedcy=resolution_immediacy,
            ),
        )

        # Generate config diff which will be used as POST request body
        aci.get_diff(aci_class='fvRsDomAtt')

        # Submit changes if module not in check_mode and the proposed is different than existing
        aci.post_config()

    elif state == 'absent':
        aci.delete_config()

    module.exit_json(**aci.result)
def main():
    argument_spec = openstack_full_argument_spec(
        name=dict(required=True),
        zone_type=dict(required=False, choice=['primary', 'secondary']),
        email=dict(required=False, default=None),
        description=dict(required=False, default=None),
        ttl=dict(required=False, default=None, type='int'),
        masters=dict(required=False, default=None, type='list'),
        state=dict(default='present', choices=['absent', 'present']),
    )

    module_kwargs = openstack_module_kwargs()
    module = AnsibleModule(argument_spec,
                           supports_check_mode=True,
                           **module_kwargs)

    name = module.params.get('name')
    state = module.params.get('state')
    wait = module.params.get('wait')
    timeout = module.params.get('timeout')

    sdk, cloud = openstack_cloud_from_module(module)
    try:
        zone = cloud.get_zone(name)

        if state == 'present':
            zone_type = module.params.get('zone_type')
            email = module.params.get('email')
            description = module.params.get('description')
            ttl = module.params.get('ttl')
            masters = module.params.get('masters')

            if module.check_mode:
                module.exit_json(changed=_system_state_change(
                    state, email, description, ttl, masters, zone))

            if zone is None:
                zone = cloud.create_zone(name=name,
                                         zone_type=zone_type,
                                         email=email,
                                         description=description,
                                         ttl=ttl,
                                         masters=masters)
                changed = True
            else:
                if masters is None:
                    masters = []

                pre_update_zone = zone
                changed = _system_state_change(state, email, description, ttl,
                                               masters, pre_update_zone)
                if changed:
                    zone = cloud.update_zone(name,
                                             email=email,
                                             description=description,
                                             ttl=ttl,
                                             masters=masters)

            if wait:
                _wait(timeout, cloud, zone, state, module, sdk)

            module.exit_json(changed=changed, zone=zone)

        elif state == 'absent':
            if module.check_mode:
                module.exit_json(changed=_system_state_change(
                    state, None, None, None, None, zone))

            if zone is None:
                changed = False
            else:
                cloud.delete_zone(name)
                changed = True

            if wait:
                _wait(timeout, cloud, zone, state, module, sdk)

            module.exit_json(changed=changed)

    except sdk.exceptions.OpenStackCloudException as e:
        module.fail_json(msg=str(e))
Exemple #40
0
class NetAppCDOTUserRole(object):
    def __init__(self):
        self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
        self.argument_spec.update(
            dict(
                state=dict(required=True, choices=['present', 'absent']),
                name=dict(required=True, type='str'),
                command_directory_name=dict(required=True, type='str'),
                access_level=dict(required=False,
                                  type='str',
                                  default='all',
                                  choices=['none', 'readonly', 'all']),
                vserver=dict(required=True, type='str'),
            ))

        self.module = AnsibleModule(argument_spec=self.argument_spec,
                                    supports_check_mode=True)

        p = self.module.params

        # set up state variables
        self.state = p['state']
        self.name = p['name']

        self.command_directory_name = p['command_directory_name']
        self.access_level = p['access_level']

        self.vserver = p['vserver']

        if HAS_NETAPP_LIB is False:
            self.module.fail_json(
                msg="the python NetApp-Lib module is required")
        else:
            self.server = netapp_utils.setup_ontap_zapi(module=self.module)

    def get_role(self):
        """
        Checks if the role exists for specific command-directory-name.

        :return:
            True if role found
            False if role is not found
        :rtype: bool
        """

        security_login_role_get_iter = netapp_utils.zapi.NaElement(
            'security-login-role-get-iter')
        query_details = netapp_utils.zapi.NaElement.create_node_with_children(
            'security-login-role-info', **{
                'vserver': self.vserver,
                'role-name': self.name,
                'command-directory-name': self.command_directory_name
            })

        query = netapp_utils.zapi.NaElement('query')
        query.add_child_elem(query_details)
        security_login_role_get_iter.add_child_elem(query)

        try:
            result = self.server.invoke_successfully(
                security_login_role_get_iter, enable_tunneling=False)
        except netapp_utils.zapi.NaApiError as e:
            # Error 16031 denotes a role not being found.
            if to_native(e.code) == "16031":
                return False
            else:
                self.module.fail_json(msg='Error getting role %s: %s' %
                                      (self.name, to_native(e)),
                                      exception=traceback.format_exc())

        if (result.get_child_by_name('num-records')
                and int(result.get_child_content('num-records')) >= 1):
            return True
        else:
            return False

    def create_role(self):
        role_create = netapp_utils.zapi.NaElement.create_node_with_children(
            'security-login-role-create', **{
                'vserver': self.vserver,
                'role-name': self.name,
                'command-directory-name': self.command_directory_name,
                'access-level': self.access_level
            })

        try:
            self.server.invoke_successfully(role_create,
                                            enable_tunneling=False)
        except netapp_utils.zapi.NaApiError as e:
            self.module.fail_json(msg='Error creating role %s: %s' %
                                  (self.name, to_native(e)),
                                  exception=traceback.format_exc())

    def delete_role(self):
        role_delete = netapp_utils.zapi.NaElement.create_node_with_children(
            'security-login-role-delete', **{
                'vserver': self.vserver,
                'role-name': self.name,
                'command-directory-name': self.command_directory_name
            })

        try:
            self.server.invoke_successfully(role_delete,
                                            enable_tunneling=False)
        except netapp_utils.zapi.NaApiError as e:
            self.module.fail_json(msg='Error removing role %s: %s' %
                                  (self.name, to_native(e)),
                                  exception=traceback.format_exc())

    def apply(self):
        changed = False
        role_exists = self.get_role()

        if role_exists:
            if self.state == 'absent':
                changed = True

            # Check if properties need to be updated
        else:
            if self.state == 'present':
                changed = True

        if changed:
            if self.module.check_mode:
                pass
            else:
                if self.state == 'present':
                    if not role_exists:
                        self.create_role()

                    # Update properties

                elif self.state == 'absent':
                    self.delete_role()

        self.module.exit_json(changed=changed)
def main():
    fields = {
        "host": {
            "required": True,
            "type": "str"
        },
        "username": {
            "required": True,
            "type": "str"
        },
        "password": {
            "required": False,
            "type": "str",
            "no_log": True
        },
        "vdom": {
            "required": False,
            "type": "str",
            "default": "root"
        },
        "https": {
            "required": False,
            "type": "bool",
            "default": "False"
        },
        "system_dscp_based_priority": {
            "required": False,
            "type": "dict",
            "options": {
                "state": {
                    "required": True,
                    "type": "str",
                    "choices": ["present", "absent"]
                },
                "ds": {
                    "required": False,
                    "type": "int"
                },
                "id": {
                    "required": True,
                    "type": "int"
                },
                "priority": {
                    "required": False,
                    "type": "str",
                    "choices": ["low", "medium", "high"]
                }
            }
        }
    }

    module = AnsibleModule(argument_spec=fields, supports_check_mode=False)
    try:
        from fortiosapi import FortiOSAPI
    except ImportError:
        module.fail_json(msg="fortiosapi module is required")

    global fos
    fos = FortiOSAPI()

    is_error, has_changed, result = fortios_system(module.params, fos)

    if not is_error:
        module.exit_json(changed=has_changed, meta=result)
    else:
        module.fail_json(msg="Error in repo", meta=result)
def main():
    argument_spec = postgres_common_argument_spec()
    argument_spec.update(
        schema=dict(type="str", required=True, aliases=['name']),
        owner=dict(type="str", default=""),
        database=dict(type="str", default="postgres", aliases=["db", "login_db"]),
        cascade_drop=dict(type="bool", default=False),
        state=dict(type="str", default="present", choices=["absent", "present"]),
        session_role=dict(type="str"),
        trust_input=dict(type="bool", default=True),
    )

    module = AnsibleModule(
        argument_spec=argument_spec,
        supports_check_mode=True,
    )

    schema = module.params["schema"]
    owner = module.params["owner"]
    state = module.params["state"]
    cascade_drop = module.params["cascade_drop"]
    session_role = module.params["session_role"]
    trust_input = module.params["trust_input"]

    if not trust_input:
        # Check input for potentially dangerous elements:
        check_input(module, schema, owner, session_role)

    changed = False

    conn_params = get_conn_params(module, module.params)
    db_connection = connect_to_db(module, conn_params, autocommit=True)
    cursor = db_connection.cursor(cursor_factory=DictCursor)

    try:
        if module.check_mode:
            if state == "absent":
                changed = not schema_exists(cursor, schema)
            elif state == "present":
                changed = not schema_matches(cursor, schema, owner)
            module.exit_json(changed=changed, schema=schema)

        if state == "absent":
            try:
                changed = schema_delete(cursor, schema, cascade_drop)
            except SQLParseError as e:
                module.fail_json(msg=to_native(e), exception=traceback.format_exc())

        elif state == "present":
            try:
                changed = schema_create(cursor, schema, owner)
            except SQLParseError as e:
                module.fail_json(msg=to_native(e), exception=traceback.format_exc())
    except NotSupportedError as e:
        module.fail_json(msg=to_native(e), exception=traceback.format_exc())
    except SystemExit:
        # Avoid catching this on Python 2.4
        raise
    except Exception as e:
        module.fail_json(msg="Database query failed: %s" % to_native(e), exception=traceback.format_exc())

    db_connection.close()
    module.exit_json(changed=changed, schema=schema, queries=executed_queries)
Exemple #43
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            instance_name=dict(type='str'),
            instance_pattern=dict(type='str'),
            tags=dict(type='list', required=True),
            state=dict(type='str',
                       default='present',
                       choices=['absent', 'present']),
            zone=dict(type='str', default='us-central1-a'),
            service_account_email=dict(type='str'),
            pem_file=dict(type='path'),
            project_id=dict(type='str'),
        ),
        mutually_exclusive=[['instance_name', 'instance_pattern']],
        required_one_of=[['instance_name', 'instance_pattern']],
    )

    instance_name = module.params.get('instance_name')
    instance_pattern = module.params.get('instance_pattern')
    state = module.params.get('state')
    tags = module.params.get('tags')
    zone = module.params.get('zone')
    changed = False

    if not HAS_LIBCLOUD:
        module.fail_json(
            msg='libcloud with GCE support (0.17.0+) required for this module')

    gce = gce_connect(module)

    # Create list of nodes to operate on
    matching_nodes = []
    try:
        if instance_pattern:
            instances = gce.list_nodes(ex_zone=zone)
            # no instances in zone
            if not instances:
                module.exit_json(changed=False,
                                 tags=tags,
                                 zone=zone,
                                 instances_updated=[])
            try:
                # Python regex fully supported: https://docs.python.org/2/library/re.html
                p = re.compile(instance_pattern)
                matching_nodes = [
                    i for i in instances if p.search(i.name) is not None
                ]
            except re.error as e:
                module.fail_json(msg='Regex error for pattern %s: %s' %
                                 (instance_pattern, e),
                                 changed=False)
        else:
            matching_nodes = [gce.ex_get_node(instance_name, zone=zone)]
    except ResourceNotFoundError:
        module.fail_json(msg='Instance %s not found in zone %s' %
                         (instance_name, zone),
                         changed=False)
    except GoogleBaseError as e:
        module.fail_json(msg=str(e),
                         changed=False,
                         exception=traceback.format_exc())

    # Tag nodes
    instance_pattern_matches = []
    tags_changed = []
    for node in matching_nodes:
        changed, tags_changed = modify_tags(gce, module, node, tags, state)
        if changed:
            instance_pattern_matches.append({
                'instance_name': node.name,
                'tags_changed': tags_changed
            })
    if instance_pattern:
        module.exit_json(changed=changed,
                         instance_pattern=instance_pattern,
                         tags=tags_changed,
                         zone=zone,
                         instances_updated=instance_pattern_matches)
    else:
        module.exit_json(changed=changed,
                         instance_name=instance_name,
                         tags=tags_changed,
                         zone=zone)
def main():
    os_choices = [
        'eos', 'junos', 'iosxr', 'fortios', 'ios', 'mock', 'nxos', 'panos',
        'vyos', 'ros'
    ]
    module = AnsibleModule(argument_spec=dict(
        hostname=dict(type='str', required=False, aliases=['host']),
        username=dict(type='str', required=False),
        password=dict(type='str', required=False, no_log=True),
        provider=dict(type='dict', required=False),
        timeout=dict(type='int', required=False, default=60),
        dev_os=dict(type='str', required=False, choices=os_choices),
        optional_args=dict(required=False, type='dict', default=None),
        args=dict(required=True, type='dict', default=None),
    ),
                           supports_check_mode=False)

    if not napalm_found:
        module.fail_json(msg="the python module napalm is required")

    provider = module.params['provider'] or {}

    no_log = ['password', 'secret']
    for param in no_log:
        if provider.get(param):
            module.no_log_values.update(return_values(provider[param]))
        if provider.get('optional_args') and provider['optional_args'].get(
                param):
            module.no_log_values.update(
                return_values(provider['optional_args'].get(param)))
        if module.params.get(
                'optional_args') and module.params['optional_args'].get(param):
            module.no_log_values.update(
                return_values(module.params['optional_args'].get(param)))

    # allow host or hostname
    provider['hostname'] = provider.get('hostname', None) or provider.get(
        'host', None)
    # allow local params to override provider
    for param, pvalue in provider.items():
        if module.params.get(param) is not False:
            module.params[param] = module.params.get(param) or pvalue

    hostname = module.params['hostname']
    username = module.params['username']
    dev_os = module.params['dev_os']
    password = module.params['password']
    timeout = module.params['timeout']
    args = module.params['args']

    argument_check = {
        'hostname': hostname,
        'username': username,
        'dev_os': dev_os,
        'password': password
    }
    for key, val in argument_check.items():
        if val is None:
            module.fail_json(msg=str(key) + " is required")

    # use checks outside of ansible defined checks, since params come can come from provider
    if dev_os not in os_choices:
        module.fail_json(msg="dev_os is not set to " + str(os_choices))

    if module.params['optional_args'] is None:
        optional_args = {}
    else:
        optional_args = module.params['optional_args']

    try:
        network_driver = get_network_driver(dev_os)
        device = network_driver(hostname=hostname,
                                username=username,
                                password=password,
                                timeout=timeout,
                                optional_args=optional_args)
        device.open()
    except Exception, e:
        module.fail_json(msg="cannot connect to device: " + str(e))
def main():
    module = AnsibleModule(
        argument_spec=dict(
            repo=dict(required=True),
            user=dict(required=True),
            password=dict(no_log=True),
            token=dict(no_log=True),
            action=dict(required=True,
                        choices=['latest_release', 'create_release']),
            tag=dict(type='str'),
            target=dict(type='str'),
            name=dict(type='str'),
            body=dict(type='str'),
            draft=dict(type='bool', default=False),
            prerelease=dict(type='bool', default=False),
        ),
        supports_check_mode=True,
        required_one_of=(('password', 'token'), ),
        mutually_exclusive=(('password', 'token'), ),
        required_if=[('action', 'create_release', ['tag'])],
    )

    if not HAS_GITHUB_API:
        module.fail_json(msg='Missing required github3 module (check docs or '
                         'install with: pip install github3.py==1.0.0a4)')

    repo = module.params['repo']
    user = module.params['user']
    password = module.params['password']
    login_token = module.params['token']
    action = module.params['action']
    tag = module.params.get('tag')
    target = module.params.get('target')
    name = module.params.get('name')
    body = module.params.get('body')
    draft = module.params.get('draft')
    prerelease = module.params.get('prerelease')

    # login to github
    try:
        if user and password:
            gh_obj = github3.login(user, password=password)
        elif login_token:
            gh_obj = github3.login(token=login_token)

        # test if we're actually logged in
        gh_obj.me()
    except github3.AuthenticationFailed:
        e = get_exception()
        module.fail_json(msg='Failed to connect to GitHub: %s' % e,
                         details="Please check username and password or token "
                         "for repository %s" % repo)

    repository = get_repository(gh_obj, user, repo)

    if not repository:
        module.fail_json(msg="Repository %s/%s doesn't exist" % (user, repo))

    if action == 'latest_release':
        release = repository.latest_release()
        if release:
            module.exit_json(tag=release.tag_name)
        else:
            module.exit_json(tag=None)

    if action == 'create_release':
        release_exists = repository.release_from_tag(tag)
        if release_exists:
            module.exit_json(skipped=True,
                             msg="Release for tag %s already exists." % tag)

        if tag in RELEASE_SHORTCUTS:
            latest_release = repository.latest_release()
            validate_release_format(latest_release)
            tag = bump_version(latest_release, tag)

        release = repository.create_release(tag, target, name, body, draft,
                                            prerelease)
        if release:
            module.exit_json(tag=release.tag_name)
        else:
            module.exit_json(tag=None)
Exemple #46
0
class NetAppOntapVolumeClone(object):
    """
        Creates a volume clone
    """
    def __init__(self):
        """
            Initialize the NetAppOntapVolumeClone class
        """
        self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
        self.argument_spec.update(
            dict(
                state=dict(required=False,
                           choices=['present'],
                           default='present'),
                parent_volume=dict(required=True, type='str'),
                volume=dict(required=True, type='str'),
                vserver=dict(required=True, type='str'),
                parent_snapshot=dict(required=False, type='str', default=None),
                parent_vserver=dict(required=False, type='str', default=None),
                qos_policy_group_name=dict(required=False,
                                           type='str',
                                           default=None),
                space_reserve=dict(required=False,
                                   choices=['volume', 'none'],
                                   default=None),
                volume_type=dict(required=False, choices=['rw', 'dp']),
            ))

        self.module = AnsibleModule(argument_spec=self.argument_spec,
                                    supports_check_mode=True)

        parameters = self.module.params

        # set up state variables
        self.state = parameters['state']
        self.parent_snapshot = parameters['parent_snapshot']
        self.parent_volume = parameters['parent_volume']
        self.parent_vserver = parameters['parent_vserver']
        self.qos_policy_group_name = parameters['qos_policy_group_name']
        self.space_reserve = parameters['space_reserve']
        self.volume = parameters['volume']
        self.volume_type = parameters['volume_type']
        self.vserver = parameters['vserver']

        if HAS_NETAPP_LIB is False:
            self.module.fail_json(
                msg="the python NetApp-Lib module is required")
        else:
            self.server = netapp_utils.setup_na_ontap_zapi(
                module=self.module, vserver=self.vserver)
        return

    def create_volume_clone(self):
        """
        Creates a new volume clone
        """
        clone_obj = netapp_utils.zapi.NaElement('volume-clone-create')
        clone_obj.add_new_child("parent-volume", self.parent_volume)
        clone_obj.add_new_child("volume", self.volume)
        if self.qos_policy_group_name:
            clone_obj.add_new_child("qos-policy-group-name",
                                    self.qos_policy_group_name)
        if self.space_reserve:
            clone_obj.add_new_child("space-reserve", self.space_reserve)
        if self.parent_snapshot:
            clone_obj.add_new_child("parent-snapshot", self.parent_snapshot)
        if self.parent_vserver:
            clone_obj.add_new_child("parent-vserver", self.parent_vserver)
        if self.volume_type:
            clone_obj.add_new_child("volume-type", self.volume_type)
        self.server.invoke_successfully(clone_obj, True)

    def does_volume_clone_exists(self):
        clone_obj = netapp_utils.zapi.NaElement('volume-clone-get')
        clone_obj.add_new_child("volume", self.volume)
        try:
            results = self.server.invoke_successfully(clone_obj, True)
        except Exception:
            return False
        attributes = results.get_child_by_name('attributes')
        info = attributes.get_child_by_name('volume-clone-info')
        parent_volume = info.get_child_content('parent-volume')
        if parent_volume == self.parent_volume:
            return True
        self.module.fail_json(
            msg="Error clone %s already exists for parent %s" %
            (self.volume, parent_volume))

    def apply(self):
        """
        Run Module based on play book
        """
        changed = False
        netapp_utils.ems_log_event("na_ontap_volume_clone", self.server)
        existing_volume_clone = self.does_volume_clone_exists()
        if existing_volume_clone is False:  # create clone
            changed = True
        if changed:
            if self.module.check_mode:
                pass
            else:
                self.create_volume_clone()

        self.module.exit_json(changed=changed)
class NSXTBaseRealizableResource(ABC):

    INCORRECT_ARGUMENT_NAME_VALUE = "error_invalid_parameter"

    def realize(self, supports_check_mode=True,
                successful_resource_exec_logs=[],
                baseline_arg_names=[], resource_params=None):
        # must call this method to realize the creation, update, or deletion of
        # resource

        self.resource_class = self.__class__

        if not hasattr(self, "_arg_spec"):
            # Base resource
            self._make_ansible_arg_spec(
                supports_check_mode=supports_check_mode)

        if not hasattr(self, 'module'):
            self.module = AnsibleModule(
                argument_spec=self._arg_spec,
                supports_check_mode=supports_check_mode)

            self.set_baseline_args(baseline_arg_names)

        # Infer manager credentials
        mgr_hostname = self.module.params['hostname']
        mgr_username = self.module.params['username']
        mgr_password = self.module.params['password']
        nsx_cert_path = self.module.params['nsx_cert_path']
        nsx_key_path = self.module.params['nsx_key_path']

        request_headers = self.module.params['request_headers']
        ca_path = self.module.params['ca_path']
        validate_certs = self.module.params['validate_certs']

        # Each manager has an associated PolicyCommunicator
        self.policy_communicator = PolicyCommunicator.get_instance(
            mgr_hostname, mgr_username, mgr_password, nsx_cert_path,
            nsx_key_path, request_headers, ca_path, validate_certs)

        if resource_params is None:
            resource_params = self.module.params

        self.resource_params = resource_params

        self._state = self.get_attribute('state', resource_params)
        if not (hasattr(self, 'id') and self.id):
            if self.get_resource_name() in BASE_RESOURCES:
                self.id = self._get_id_using_attr_name(
                    None, resource_params,
                    self.get_resource_base_url(self.baseline_args),
                    self.get_spec_identifier())
            else:
                self.id = self._get_id_using_attr_name(
                    None, resource_params,
                    self.get_resource_base_url(self._parent_info),
                    self.get_spec_identifier())

        if self.id is None:
            return

        # Extract the resource params from module
        self.nsx_resource_params = self._extract_nsx_resource_params(
            resource_params)

        # parent_info is passed to subresources of a resource automatically
        if not hasattr(self, "_parent_info"):
            self._parent_info = {}
        self.update_parent_info(self._parent_info)

        try:
            # get existing resource schema
            _, self.existing_resource = self._send_request_to_API(
                "/" + self.id, ignore_error=False,
                accepted_error_codes=set([404]))
            # As Policy API's PATCH requires all attributes to be filled,
            # we fill the missing resource params (the params not specified)
            # by user using the existing params
            self._fill_missing_resource_params(
                self.existing_resource, self.nsx_resource_params)
        except Exception as err:
            # the resource does not exist currently on the manager
            self.existing_resource = None
        self._achieve_state(resource_params, successful_resource_exec_logs)

    @classmethod
    def get_spec_identifier(cls):
        # Can be overriden in the subclass to provide different
        # unique_arg_identifier. It is used to infer which args belong to which
        # subresource.
        # By default, class name is used for subresources.
        return cls.get_resource_name()

    def get_state(self):
        return self._state

    def get_parent_info(self):
        return self._parent_info

    @staticmethod
    @abstractmethod
    def get_resource_base_url(parent_info):
        # Must be overridden by the subclass
        raise NotImplementedError

    @staticmethod
    @abstractmethod
    def get_resource_spec():
        # Must be overridden by the subclass
        raise NotImplementedError

    @classmethod
    def get_resource_name(cls):
        return cls.__name__

    def create_or_update_subresource_first(self):
        # return True if subresource should be created/updated before parent
        # resource
        return self.resource_params.get(
            "create_or_update_subresource_first", False)

    def delete_subresource_first(self):
        # return True if subresource should be deleted before parent resource
        return self.resource_params.get("delete_subresource_first", True)

    def achieve_subresource_state_if_del_parent(self):
        # return True if this resource is to be realized with its own specified
        # state irrespective of the state of its parent resource.
        return self.resource_params.get(
            "achieve_subresource_state_if_del_parent", False)

    def do_wait_till_create(self):
        # By default, we do not wait for the parent resource to be created or
        # updated before its subresource is to be realized.
        return self.resource_params.get("do_wait_till_create", False)

    @staticmethod
    def get_resource_update_priority():
        # this priority can be used to create/delete subresources
        # at the same level in a particular order.
        # by default, it returns 1 so the resources are created/updated/
        # deleted in a fixed but random order.
        # should be overloaded in subclass to specify its priority.
        # for creation or update, we iterate in descending order.
        # for deletion, we iterate in ascending order.
        return 1

    def set_arg_spec(self, arg_spec):
        self._arg_spec = arg_spec

    def set_ansible_module(self, ansible_module):
        self.module = ansible_module

    def set_parent_info(self, parent_info):
        self._parent_info = parent_info

    def achieve_subresource_state(
            self, resource_params, successful_resource_exec_logs):
        """
            Achieve the state of each sub-resource.
        """
        for sub_resource_class in self._get_sub_resources_class_of(
                self.resource_class):
            if sub_resource_class.allows_multiple_resource_spec():
                children_resource_spec = (resource_params.get(
                    sub_resource_class.get_spec_identifier()) or [])
            else:
                children_resource_spec = ([resource_params.get(
                    sub_resource_class.get_spec_identifier())] or [])

            # Update the parent pointer
            my_parent = self._parent_info.get('_parent', '')
            self._update_parent_info()

            for resource_param_spec in children_resource_spec:
                if resource_param_spec is not None:
                    sub_resource = sub_resource_class()

                    sub_resource.set_arg_spec(self._arg_spec)
                    sub_resource.set_ansible_module(self.module)

                    sub_resource.set_parent_info(self._parent_info)

                    sub_resource.realize(
                        successful_resource_exec_logs=(
                            successful_resource_exec_logs),
                        resource_params=resource_param_spec)

            # Restore the parent pointer
            self._parent_info['_parent'] = my_parent

    def update_resource_params(self, nsx_resource_params):
        # Can be used to updates the params of resource before making
        # the API call.
        # Should be overridden in the subclass if needed
        pass

    def check_for_update(self, existing_params, resource_params):
        """
            resource_params: dict
            existing_params: dict

            Compares the existing_params with resource_params and returns
            True if they are different. At a base level, it traverses the
            params and matches one-to-one. If the value to be matched is a
            - dict, it traverses that also.
            - list, it merely compares the order.
            Can be overriden in the subclass for specific custom checking.

            Returns true if the params differ
        """
        if not existing_params:
            return False
        for k, v in resource_params.items():
            if k not in existing_params:
                return True
            elif type(v).__name__ == 'dict':
                if self.check_for_update(existing_params[k], v):
                    return True
            elif v != existing_params[k]:
                def compare_lists(list1, list2):
                    # Returns True if list1 and list2 differ
                    try:
                        # If the lists can be converted into sets, do so and
                        # compare lists as sets.
                        set1 = set(list1)
                        set2 = set(list2)
                        return set1 != set2
                    except Exception:
                        return True
                if type(v).__name__ == 'list':
                    if compare_lists(v, existing_params[k]):
                        return True
                    continue
                return True
        return False

    def update_parent_info(self, parent_info):
        # Override this and fill in self._parent_info if that is to be passed
        # to the sub-resource
        # By default, parent's id is passed
        parent_info[self.get_spec_identifier() + "_id"] = self.id

    def get_attribute(self, attribute, resource_params):
        """
            attribute: String
            resource_params: Parameters of the resource
        """
        if (attribute == "state" and
                self.get_resource_name() not in BASE_RESOURCES):
            # if parent has absent state, subresources should have absent
            # state if . So, irrespective of what user specifies, if parent
            # is to be deleted, the child resources will be deleted.
            # override achieve_subresource_state_if_del_parent
            # in resource class to change this behavior
            if (self._parent_info["_parent"].get_state() == "absent" and
                    not self.achieve_subresource_state_if_del_parent()):
                return "absent"
        return resource_params.get(
            attribute, self.INCORRECT_ARGUMENT_NAME_VALUE)

    def set_baseline_args(self, baseline_arg_names):
        # Can be overriden in subclass
        self.baseline_args = {}
        for baseline_arg_name in baseline_arg_names:
            self.baseline_args[baseline_arg_name] = self.module.params[
                baseline_arg_name]

    def do_resource_params_have_attr_with_id_or_display_name(self, attr):
        if (attr + "_id" in self.nsx_resource_params or
                attr + "_display_name" in self.nsx_resource_params):
            return True
        return False

    def get_id_using_attr_name_else_fail(self, attr_name, params,
                                         resource_base_url, resource_type,
                                         ignore_not_found_error=True):
        resource_id = self._get_id_using_attr_name(
            attr_name, params, resource_base_url, resource_type,
            ignore_not_found_error)
        if resource_id is not None:
            return resource_id
        # Incorrect usage of Ansible Module
        self.module.fail_json(msg="Please specify either {} id or display_name"
                              " for the resource {}".format(
                                  attr_name, str(resource_type)))

    def exit_with_failure(self, msg, **kwargs):
        self.module.fail_json(msg=msg, **kwargs)

    def skip_delete(self):
        """
        Override in subclass if this resource is skipped to be deleted.
        Note that the children of this resource will still be deleted unless
        they override this method as well.
        """
        return False

    @classmethod
    def is_required_in_spec(cls):
        """
        Override in subclass if this resource is optional to be specified
        in the ansible playbook.
        """
        return False

    @classmethod
    def allows_multiple_resource_spec(cls):
        """
        Override in the resource class definition with False if only one
        resource can be associated with the parent. By default, we accept
        multiple
        """
        return True

    def _get_id_using_attr_name(self, attr_name, params,
                                resource_base_url, resource_type,
                                ignore_not_found_error=True):
        # Pass attr_name '' or None to infer base resource's ID
        id_identifier = 'id'
        display_name_identifier = 'display_name'
        if attr_name:
            id_identifier = attr_name + "_id"
            display_name_identifier = attr_name + "_display_name"
        if id_identifier in params and params[id_identifier]:
            return params.pop(id_identifier)
        if (display_name_identifier in params and
                params[display_name_identifier]):
            resource_display_name = params.pop(display_name_identifier)
            # Use display_name as ID if ID is not specified.
            return (self.get_id_from_display_name(
                resource_base_url, resource_display_name, resource_type,
                ignore_not_found_error) or resource_display_name)

    def get_id_from_display_name(self, resource_base_url,
                                 resource_display_name,
                                 resource_type, ignore_not_found_error=True):
        try:
            # Get the id from the Manager
            (_, resp) = self._send_request_to_API(
                resource_base_url=resource_base_url)
            matched_resource = None
            for resource in resp['results']:
                if (resource.__contains__('display_name') and
                        resource['display_name'] == resource_display_name):
                    if matched_resource is None:
                        matched_resource = resource
                    else:
                        # Multiple resources with same display_name!
                        # Ask the user to specify ID instead.
                        self.module.fail_json(
                            msg="Multiple {} found with display_name {}. "
                                "Please specify the resource using id in "
                                "the playbook.".format(resource_type,
                                                       resource_display_name))
            if matched_resource is not None:
                return matched_resource['id']
            else:
                if ignore_not_found_error:
                    return None
                else:
                    # No resource found with this display_name
                    self.module.fail_json(
                        msg="No {} found with display_name {} for the "
                            "specified configuration.".format(
                                resource_type, resource_display_name))
        except Exception as e:
            # Manager replied with invalid URL. It means that the resource
            # does not exist on the Manager. So, return the display_name
            return resource_display_name

    def _update_parent_info(self):
        # This update is always performed and should not be overriden by the
        # subresource's class
        self._parent_info["_parent"] = self

    def _make_ansible_arg_spec(self, supports_check_mode=True):
        """
            We read the arg_spec of all the resources associated that
            are associated with this resource and create one complete
            arg_spec.
        """
        if self.get_resource_name() in BASE_RESOURCES:
            self._arg_spec = {}
            # Update it with VMware arg spec
            self._arg_spec.update(
                PolicyCommunicator.get_vmware_argument_spec())

            # ... then update it with top most resource spec ...
            self._update_arg_spec_with_resource(
                self.resource_class, self._arg_spec)
            # Update with all sub-resources arg spec
            for sub_resources_class in self._get_sub_resources_class_of(
                    self.resource_class):
                self._update_arg_spec_with_all_resources(
                    sub_resources_class, self._arg_spec)

    def _update_arg_spec_with_resource(self, resource_class, arg_spec):
        # updates _arg_spec with resource_class's arg_spec
        resource_arg_spec = self._get_base_arg_spec_of_resource()
        resource_arg_spec.update(self._get_base_arg_spec_of_nsx_resource())
        resource_arg_spec.update(resource_class.get_resource_spec())
        if resource_class.__name__ not in BASE_RESOURCES:
            arg_spec.update(
                {
                    resource_class.get_spec_identifier(): dict(
                        options=resource_arg_spec,
                        required=resource_class.is_required_in_spec(),
                        type='dict',
                    )
                })
            if resource_class.allows_multiple_resource_spec():
                arg_spec[resource_class.get_spec_identifier()]['type'] = 'list'
                arg_spec[resource_class.get_spec_identifier()]['elements'] = (
                    'dict')
        else:
            arg_spec.update(resource_arg_spec)
        return resource_arg_spec

    def _update_arg_spec_with_all_resources(self, resource_class, arg_spec):
        # updates _arg_spec with resource_class's arg_spec and all it's
        # sub-resources
        resource_arg_spec = self._update_arg_spec_with_resource(
            resource_class, arg_spec)
        # go to each child of resource_class and update it
        for sub_resources_class in self._get_sub_resources_class_of(
                resource_class):
            self._update_arg_spec_with_all_resources(
                sub_resources_class, resource_arg_spec)

    def _get_base_arg_spec_of_nsx_resource(self):
        resource_base_arg_spec = {}
        resource_base_arg_spec.update(
            # these are the base args for any NSXT Resource
            display_name=dict(
                required=False,
                type='str'
            ),
            description=dict(
                required=False,
                type='str'
            ),
            tags=dict(
                required=False,
                type='list',
                elements='dict',
                options=dict(
                    scope=dict(
                        required=True,
                        type='str'
                    ),
                    tag=dict(
                        required=True,
                        type='str'
                    )
                )
            )
        )
        return resource_base_arg_spec

    def _get_base_arg_spec_of_resource(self):
        resource_base_arg_spec = {}
        resource_base_arg_spec.update(
            id=dict(
                type='str'
            ),
            state=dict(
                required=True,
                type='str',
                choices=['present', 'absent']
            ),
            create_or_update_subresource_first=dict(
                default=False,
                type='bool'
            ),
            delete_subresource_first=dict(
                default=True,
                type='bool'
            ),
            achieve_subresource_state_if_del_parent=dict(
                default=False,
                type='bool'
            ),
            do_wait_till_create=dict(
                default=False,
                type='bool'
            )
        )
        return resource_base_arg_spec

    def _extract_nsx_resource_params(self, resource_params):
        # extract the params belonging to this resource only.
        filtered_params = {}

        def filter_with_spec(spec):
            for key in spec.keys():
                if (key in resource_params and
                        resource_params[key] is not None):
                    filtered_params[key] = resource_params[key]

        filter_with_spec(self.get_resource_spec())
        filter_with_spec(self._get_base_arg_spec_of_nsx_resource())
        return filtered_params

    def _achieve_present_state(self, successful_resource_exec_logs):
        self.update_resource_params(self.nsx_resource_params)
        is_resource_updated = self.check_for_update(
            self.existing_resource, self.nsx_resource_params)
        if not is_resource_updated:
            # Either the resource does not exist or it exists but was not
            # updated in the YAML.
            if self.module.check_mode:
                successful_resource_exec_logs.append({
                    "changed": True,
                    "debug_out": self.resource_params,
                    "id": '12345',
                    "resource_type": self.get_resource_name()
                })
                return
            try:
                if self.existing_resource:
                    # Resource already exists
                    successful_resource_exec_logs.append({
                        "changed": False,
                        "id": self.id,
                        "message": "%s with id %s already exists." %
                        (self.get_resource_name(), self.id),
                        "resource_type": self.get_resource_name()
                    })
                    return
                # Create a new resource
                _, resp = self._send_request_to_API(
                    suffix="/" + self.id, method='PATCH',
                    data=self.nsx_resource_params)
                if self.do_wait_till_create() and not self._wait_till_create():
                    raise Exception

                successful_resource_exec_logs.append({
                    "changed": True,
                    "id": self.id,
                    "body": str(resp),
                    "message": "%s with id %s created." %
                    (self.get_resource_name(), self.id),
                    "resource_type": self.get_resource_name()
                })
            except Exception as err:
                srel = successful_resource_exec_logs
                self.module.fail_json(msg="Failed to add %s with id %s."
                                          "Request body [%s]. Error[%s]."
                                          % (self.get_resource_name(),
                                             self.id, self.nsx_resource_params,
                                             to_native(err)
                                             ),
                                      successfully_updated_resources=srel)
        else:
            # The resource exists and was updated in the YAML.
            if self.module.check_mode:
                successful_resource_exec_logs.append({
                    "changed": True,
                    "debug_out": self.resource_params,
                    "id": self.id,
                    "resource_type": self.get_resource_name()
                })
                return
            self.nsx_resource_params['_revision'] = \
                self.existing_resource['_revision']
            try:
                _, resp = self._send_request_to_API(
                    suffix="/"+self.id, method="PATCH",
                    data=self.nsx_resource_params)
                successful_resource_exec_logs.append({
                    "changed": True,
                    "id": self.id,
                    "body": str(resp),
                    "message": "%s with id %s updated." %
                    (self.get_resource_name(), self.id),
                    "resource_type": self.get_resource_name()
                })
            except Exception as err:
                srel = successful_resource_exec_logs
                self.module.fail_json(msg="Failed to update %s with id %s."
                                          "Request body [%s]. Error[%s]." %
                                          (self.get_resource_name(), self.id,
                                           self.nsx_resource_params, to_native(
                                               err)
                                           ),
                                      successfully_updated_resources=srel)

    def _achieve_absent_state(self, successful_resource_exec_logs):
        if self.skip_delete():
            return

        if self.existing_resource is None:
            successful_resource_exec_logs.append({
                "changed": False,
                "msg": 'No %s exist with id %s' %
                (self.get_resource_name(), self.id),
                "resource_type": self.get_resource_name()
            })
            return
        if self.module.check_mode:
            successful_resource_exec_logs.append({
                "changed": True,
                "debug_out": self.resource_params,
                "id": self.id,
                "resource_type": self.get_resource_name()
            })
            return
        try:
            self._send_request_to_API("/" + self.id, method='DELETE')
            self._wait_till_delete()
            successful_resource_exec_logs.append({
                "changed": True,
                "id": self.id,
                "message": "%s with id %s deleted." %
                (self.get_resource_name(), self.id)
            })
        except Exception as err:
            srel = successful_resource_exec_logs
            self.module.fail_json(msg="Failed to delete %s with id %s. "
                                      "Error[%s]." % (self.get_resource_name(),
                                                      self.id, to_native(err)),
                                  successfully_updated_resources=srel)

    def _send_request_to_API(self, suffix="", ignore_error=False,
                             method='GET', data=None,
                             resource_base_url=None,
                             accepted_error_codes=set()):
        try:
            if not resource_base_url:
                if self.get_resource_name() not in BASE_RESOURCES:
                    resource_base_url = (self.resource_class.
                                         get_resource_base_url(
                                             parent_info=self._parent_info))
                else:
                    resource_base_url = (self.resource_class.
                                         get_resource_base_url(
                                             baseline_args=self.baseline_args))
            (rc, resp) = self.policy_communicator.request(
                resource_base_url + suffix,
                ignore_errors=ignore_error, method=method, data=data)
            return (rc, resp)
        except DuplicateRequestError:
            self.module.fail_json(msg='Duplicate request')
        except Exception as e:
            if (e.args[0] not in accepted_error_codes and
                    self.get_resource_name() in BASE_RESOURCES):
                msg = ('Received {} from NSX Manager. Please try '
                       'again. '.format(e.args[0]))
                if len(e.args) == 2 and e.args[1] and (
                        'error_message' in e.args[1]):
                    msg += e.args[1]['error_message']
                self.module.fail_json(msg=msg)
            raise e

    def _achieve_state(self, resource_params,
                       successful_resource_exec_logs=[]):
        """
            Achieves `present` or `absent` state as specified in the YAML.
        """
        if self.id == self.INCORRECT_ARGUMENT_NAME_VALUE:
            # The resource was not specified in the YAML.
            # So, no need to realize it.
            return
        if (self._state == "present" and
                self.create_or_update_subresource_first()):
            self.achieve_subresource_state(
                resource_params, successful_resource_exec_logs)
        if self._state == "absent" and self.delete_subresource_first():
            self.achieve_subresource_state(
                resource_params, successful_resource_exec_logs)

        if self._state == 'present':
            self._achieve_present_state(
                successful_resource_exec_logs)
        else:
            self._achieve_absent_state(successful_resource_exec_logs)

        if self._state == "present" and not (
                self.create_or_update_subresource_first()):
            self.achieve_subresource_state(
                resource_params,
                successful_resource_exec_logs=successful_resource_exec_logs)

        if self._state == "absent" and not self.delete_subresource_first():
            self.achieve_subresource_state(
                resource_params, successful_resource_exec_logs)

        if self.get_resource_name() in BASE_RESOURCES:
            changed = False
            for successful_resource_exec_log in successful_resource_exec_logs:
                if successful_resource_exec_log["changed"]:
                    changed = True
                    break
            srel = successful_resource_exec_logs
            self.module.exit_json(changed=changed,
                                  successfully_updated_resources=srel)

    def _get_sub_resources_class_of(self, resource_class):
        subresources = []
        for attr in resource_class.__dict__.values():
            if (inspect.isclass(attr) and
                    issubclass(attr, NSXTBaseRealizableResource)):
                subresources.append(attr)
        if hasattr(self, "_state") and self._state == "present":
            subresources.sort(key=lambda subresource:
                              subresource().get_resource_update_priority(),
                              reverse=True)
        else:
            subresources.sort(key=lambda subresource:
                              subresource().get_resource_update_priority(),
                              reverse=False)
        for subresource in subresources:
            yield subresource

    def _wait_till_delete(self):
        """
            Periodically checks if the resource still exists on the API server
            every 10 seconds. Returns after it has been deleted.
        """
        while True:
            try:
                self._send_request_to_API(
                    "/" + self.id, accepted_error_codes=set([404]))
                time.sleep(10)
            except DuplicateRequestError:
                self.module.fail_json(msg='Duplicate request')
            except Exception:
                return

    def _wait_till_create(self):
        FAILED_STATES = ["failed"]
        IN_PROGRESS_STATES = ["pending", "in_progress"]
        SUCCESS_STATES = ["partial_success", "success"]
        try:
            count = 0
            while True:
                rc, resp = self._send_request_to_API(
                    "/" + self.id, accepted_error_codes=set([404]))
                if 'state' in resp:
                    if any(resp['state'] in progress_status for progress_status
                            in IN_PROGRESS_STATES):
                        time.sleep(10)
                        count = count + 1
                        if count == 90:
                            # Wait for max 15 minutes for host to realize
                            return False
                    elif any(resp['state'] in progress_status for
                             progress_status in SUCCESS_STATES):
                        return True
                    else:
                        # Failed State
                        return False
                else:
                    if rc != 200:
                        time.sleep(1)
                        count = count + 1
                        if count == 90:
                            # Wait for max 15 minutes for host to realize
                            return False
                    else:
                        return True
        except Exception as err:
            return False

    def _fill_missing_resource_params(self, existing_params, resource_params):
        """
            resource_params: dict
            existing_params: dict

            Fills resource_params with the key:value from existing_params if
            missing in the former.
        """
        if not existing_params:
            return
        for k, v in existing_params.items():
            if k not in resource_params:
                resource_params[k] = v
            elif type(v).__name__ == 'dict':
                self._fill_missing_resource_params(v, resource_params[k])
Exemple #48
0
def main():
    """ Main entry point for Ansible module execution.
    """

    results = {}

    argument_spec = rubrik_argument_spec

    # Start Parameters
    argument_spec.update(
        dict(url=dict(required=True),
             wait_for_completion=dict(required=False,
                                      type='bool',
                                      default=True),
             timeout=dict(required=False, type='int', default=15)))
    # End Parameters

    module = AnsibleModule(argument_spec=argument_spec,
                           supports_check_mode=False)

    if sdk_present is False:
        module.fail_json(
            msg=
            "The Rubrik Python SDK is required for this module (pip install rubrik_cdm)."
        )

    load_provider_variables(module)
    ansible = module.params

    try:
        rubrik = rubrik_cdm.Connect()
    except SystemExit as error:
        if "has not been provided" in str(error):
            try:
                ansible["node_ip"]
                ansible["username"]
                ansible["password"]
            except KeyError:
                module.fail_json(
                    msg=
                    "Error: The Rubrik login credentials are missing. Verify the correct env vars are present or provide them through the provider param."
                )
        else:
            module.fail_json(msg=str(error))

        try:
            rubrik = rubrik_cdm.Connect(ansible['node_ip'],
                                        ansible['username'],
                                        ansible['password'])
        except SystemExit as error:
            module.fail_json(msg=str(error))

    try:
        api_request = rubrik.job_status(ansible["url"],
                                        ansible["wait_for_completion"],
                                        ansible["timeout"])
    except SystemExit as error:
        module.fail_json(msg=str(error))

    results["changed"] = False

    results["response"] = api_request

    module.exit_json(**results)
Exemple #49
0
def main():
    argument_spec = dict(
        gather_subset=dict(default=['!config'], type='list')
    )

    argument_spec.update(vyos_argument_spec)

    module = AnsibleModule(argument_spec=argument_spec,
                           supports_check_mode=True)

    warnings = list()
    check_args(module, warnings)

    gather_subset = module.params['gather_subset']

    runable_subsets = set()
    exclude_subsets = set()

    for subset in gather_subset:
        if subset == 'all':
            runable_subsets.update(VALID_SUBSETS)
            continue

        if subset.startswith('!'):
            subset = subset[1:]
            if subset == 'all':
                exclude_subsets.update(VALID_SUBSETS)
                continue
            exclude = True
        else:
            exclude = False

        if subset not in VALID_SUBSETS:
            module.fail_json(msg='Subset must be one of [%s], got %s' %
                             (', '.join(VALID_SUBSETS), subset))

        if exclude:
            exclude_subsets.add(subset)
        else:
            runable_subsets.add(subset)

    if not runable_subsets:
        runable_subsets.update(VALID_SUBSETS)

    runable_subsets.difference_update(exclude_subsets)
    runable_subsets.add('default')

    facts = dict()
    facts['gather_subset'] = list(runable_subsets)

    instances = list()
    for key in runable_subsets:
        instances.append(FACT_SUBSETS[key](module))

    for inst in instances:
        inst.populate()
        facts.update(inst.facts)

    ansible_facts = dict()
    for key, value in iteritems(facts):
        key = 'ansible_net_%s' % key
        ansible_facts[key] = value

    module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
def main():
    argument_spec = ovirt_full_argument_spec(
        state=dict(
            choices=['present', 'absent'],
            default='present',
        ),
        name=dict(aliases=['host'], required=True),
        bond=dict(default=None, type='dict'),
        interface=dict(default=None),
        networks=dict(default=None, type='list'),
        labels=dict(default=None, type='list'),
        check=dict(default=None, type='bool'),
        save=dict(default=True, type='bool'),
        sync_networks=dict(default=False, type='bool'),
    )
    module = AnsibleModule(argument_spec=argument_spec)

    check_sdk(module)

    try:
        auth = module.params.pop('auth')
        connection = create_connection(auth)
        hosts_service = connection.system_service().hosts_service()
        host_networks_module = HostNetworksModule(
            connection=connection,
            module=module,
            service=hosts_service,
        )

        host = host_networks_module.search_entity()
        if host is None:
            raise Exception("Host '%s' was not found." % module.params['name'])

        bond = module.params['bond']
        interface = module.params['interface']
        networks = module.params['networks']
        labels = module.params['labels']
        nic_name = bond.get('name') if bond else module.params['interface']

        host_service = hosts_service.host_service(host.id)
        nics_service = host_service.nics_service()
        nic = search_by_name(nics_service, nic_name)

        if module.params["sync_networks"]:
            if needs_sync(nics_service):
                if not module.check_mode:
                    host_service.sync_all_networks()
                host_networks_module.changed = True

        network_names = [network['name'] for network in networks or []]
        state = module.params['state']

        if (
            state == 'present' and
            (nic is None or host_networks_module.has_update(nics_service.service(nic.id)))
        ):
            # Remove networks which are attached to different interface then user want:
            attachments_service = host_service.network_attachments_service()

            # Append attachment ID to network if needs update:
            for a in attachments_service.list():
                current_network_name = get_link_name(connection, a.network)
                if current_network_name in network_names:
                    for n in networks:
                        if n['name'] == current_network_name:
                            n['id'] = a.id

            # Check if we have to break some bonds:
            removed_bonds = []
            if nic is not None:
                for host_nic in nics_service.list():
                    if host_nic.bonding and nic.id in [slave.id for slave in host_nic.bonding.slaves]:
                        removed_bonds.append(otypes.HostNic(id=host_nic.id))

            # Assign the networks:
            setup_params = dict(
                entity=host,
                action='setup_networks',
                check_connectivity=module.params['check'],
                removed_bonds=removed_bonds if removed_bonds else None,
                modified_bonds=[
                    otypes.HostNic(
                        name=bond.get('name'),
                        bonding=otypes.Bonding(
                            options=get_bond_options(bond.get('mode'), bond.get('options')),
                            slaves=[
                                otypes.HostNic(name=i) for i in bond.get('interfaces', [])
                            ],
                        ),
                    ),
                ] if bond else None,
                modified_labels=[
                    otypes.NetworkLabel(
                        id=str(name),
                        host_nic=otypes.HostNic(
                            name=bond.get('name') if bond else interface
                        ),
                    ) for name in labels
                ] if labels else None,
                modified_network_attachments=[
                    otypes.NetworkAttachment(
                        id=network.get('id'),
                        network=otypes.Network(
                            name=network['name']
                        ) if network['name'] else None,
                        host_nic=otypes.HostNic(
                            name=bond.get('name') if bond else interface
                        ),
                        ip_address_assignments=[
                            otypes.IpAddressAssignment(
                                assignment_method=otypes.BootProtocol(
                                    network.get('boot_protocol', 'none')
                                ),
                                ip=otypes.Ip(
                                    address=network.get('address'),
                                    gateway=network.get('gateway'),
                                    netmask=network.get('netmask'),
                                    version=otypes.IpVersion(
                                        network.get('version')
                                    ) if network.get('version') else None,
                                ),
                            ),
                        ],
                        properties=[
                            otypes.Property(
                                name=prop.get('name'),
                                value=prop.get('value')
                            ) for prop in network.get('custom_properties')
                        ]
                    ) for network in networks
                ] if networks else None,
            )
            if engine_supported(connection, '4.3'):
                setup_params['commit_on_success'] = module.params['save']
            elif module.params['save']:
                setup_params['post_action'] = host_networks_module._action_save_configuration
            host_networks_module.action(**setup_params)
        elif state == 'absent' and nic:
            attachments = []
            nic_service = nics_service.nic_service(nic.id)

            attached_labels = set([str(lbl.id) for lbl in nic_service.network_labels_service().list()])
            if networks:
                attachments_service = nic_service.network_attachments_service()
                attachments = attachments_service.list()
                attachments = [
                    attachment for attachment in attachments
                    if get_link_name(connection, attachment.network) in network_names
                ]

            # Remove unmanaged networks:
            unmanaged_networks_service = host_service.unmanaged_networks_service()
            unmanaged_networks = [(u.id, u.name) for u in unmanaged_networks_service.list()]
            for net_id, net_name in unmanaged_networks:
                if net_name in network_names:
                    if not module.check_mode:
                        unmanaged_networks_service.unmanaged_network_service(net_id).remove()
                    host_networks_module.changed = True

            # Need to check if there are any labels to be removed, as backend fail
            # if we try to send remove non existing label, for bond and attachments it's OK:
            if (labels and set(labels).intersection(attached_labels)) or bond or attachments:
                setup_params = dict(
                    entity=host,
                    action='setup_networks',
                    check_connectivity=module.params['check'],
                    removed_bonds=[
                        otypes.HostNic(
                            name=bond.get('name'),
                        ),
                    ] if bond else None,
                    removed_labels=[
                        otypes.NetworkLabel(id=str(name)) for name in labels
                    ] if labels else None,
                    removed_network_attachments=attachments if attachments else None,
                )
                if engine_supported(connection, '4.3'):
                    setup_params['commit_on_success'] = module.params['save']
                elif module.params['save']:
                    setup_params['post_action'] = host_networks_module._action_save_configuration
                host_networks_module.action(**setup_params)

        nic = search_by_name(nics_service, nic_name)
        module.exit_json(**{
            'changed': host_networks_module.changed,
            'id': nic.id if nic else None,
            'host_nic': get_dict_of_struct(nic),
        })
    except Exception as e:
        module.fail_json(msg=str(e), exception=traceback.format_exc())
    finally:
        connection.close(logout=auth.get('token') is None)
Exemple #51
0
class ElementSWVlan(object):
    """ class to handle VLAN operations """

    def __init__(self):
        """
            Setup Ansible parameters and ElementSW connection
        """
        self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
        self.argument_spec.update(dict(
            state=dict(required=False, choices=['present', 'absent'],
                       default='present'),
            name=dict(required=False, type='str'),
            vlan_tag=dict(required=True, type='str'),
            svip=dict(required=False, type='str'),
            netmask=dict(required=False, type='str'),
            gateway=dict(required=False, type='str'),
            namespace=dict(required=False, type='bool'),
            attributes=dict(required=False, type='dict'),
            address_blocks=dict(required=False, type='list')
        ))

        self.module = AnsibleModule(
            argument_spec=self.argument_spec,
            supports_check_mode=True
        )

        if HAS_SF_SDK is False:
            self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
        else:
            self.elem = netapp_utils.create_sf_connection(module=self.module)

        self.na_helper = NetAppModule()
        self.parameters = self.na_helper.set_parameters(self.module.params)

        self.elementsw_helper = NaElementSWModule(self.sfe)

        # add telemetry attributes
        if self.parameters['attributes'] is not None:
            self.parameters['attributes'].update(self.elementsw_helper.set_element_attributes(source='na_elementsw_vlan'))
        else:
            self.parameters['attributes'] = self.elementsw_helper.set_element_attributes(source='na_elementsw_vlan')

    def validate_keys(self):
        """
            Validate if all required keys are present before creating
        """
        required_keys = ['address_blocks', 'svip', 'netmask', 'name']
        if all(item in self.parameters.keys() for item in required_keys) is False:
            self.module.fail_json(msg="One or more required fields %s for creating VLAN is missing"
                                      % required_keys)
        addr_blk_fields = ['start', 'size']
        for address in self.parameters['address_blocks']:
            if 'start' not in address or 'size' not in address:
                self.module.fail_json(msg="One or more required fields %s for address blocks is missing"
                                          % addr_blk_fields)

    def create_network(self):
        """
            Add VLAN
        """
        try:
            self.validate_keys()
            create_params = self.parameters.copy()
            for key in ['username', 'hostname', 'password', 'state', 'vlan_tag']:
                del create_params[key]
            self.elem.add_virtual_network(virtual_network_tag=self.parameters['vlan_tag'], **create_params)
        except solidfire.common.ApiServerError as err:
            self.module.fail_json(msg="Error creating VLAN %s"
                                  % self.parameters['vlan_name'],
                                  exception=to_native(err))

    def delete_network(self):
        """
            Remove VLAN
        """
        try:
            self.elem.remove_virtual_network(virtual_network_tag=self.parameters['vlan_tag'])
        except solidfire.common.ApiServerError as err:
            self.module.fail_json(msg="Error deleting VLAN %s"
                                  % self.parameters['vlan_tag'],
                                  exception=to_native(err))

    def modify_network(self, modify):
        """
            Modify the VLAN
        """
        try:
            self.elem.modify_virtual_network(virtual_network_tag=self.parameters['vlan_tag'], **modify)
        except solidfire.common.ApiServerError as err:
            self.module.fail_json(msg="Error modifying VLAN %s"
                                  % self.parameters['vlan_tag'],
                                  exception=to_native(err))

    def get_network_details(self):
        """
            Check existing VLANs
            :return: vlan details if found, None otherwise
            :type: dict
        """
        vlans = self.elem.list_virtual_networks(virtual_network_tag=self.parameters['vlan_tag'])
        vlan_details = dict()
        for vlan in vlans.virtual_networks:
            if vlan is not None:
                vlan_details['name'] = vlan.name
                vlan_details['address_blocks'] = list()
                for address in vlan.address_blocks:
                    vlan_details['address_blocks'].append({
                        'start': address.start,
                        'size': address.size
                    })
                vlan_details['svip'] = vlan.svip
                vlan_details['gateway'] = vlan.gateway
                vlan_details['netmask'] = vlan.netmask
                vlan_details['namespace'] = vlan.namespace
                vlan_details['attributes'] = vlan.attributes
                return vlan_details
        return None

    def apply(self):
        """
            Call create / delete / modify vlan methods
        """
        network = self.get_network_details()
        # calling helper to determine action
        cd_action = self.na_helper.get_cd_action(network, self.parameters)
        modify = self.na_helper.get_modified_attributes(network, self.parameters)
        if cd_action == "create":
            self.create_network()
        elif cd_action == "delete":
            self.delete_network()
        elif modify:
            self.modify_network(modify)
        self.module.exit_json(changed=self.na_helper.changed)
def main():
    module_args = oci_common_utils.get_common_arg_spec(
        supports_create=True, supports_wait=True
    )
    module_args.update(
        dict(
            availability_domain=dict(type="str"),
            backup_policy_id=dict(type="str"),
            compartment_id=dict(type="str"),
            defined_tags=dict(type="dict"),
            display_name=dict(aliases=["name"], type="str"),
            freeform_tags=dict(type="dict"),
            kms_key_id=dict(type="str"),
            size_in_gbs=dict(type="int"),
            vpus_per_gb=dict(type="int"),
            source_details=dict(
                type="dict",
                options=dict(
                    type=dict(
                        type="str",
                        required=True,
                        choices=["bootVolumeBackup", "bootVolume", "bootVolumeReplica"],
                    ),
                    id=dict(type="str", required=True),
                ),
            ),
            is_auto_tune_enabled=dict(type="bool"),
            boot_volume_replicas=dict(
                type="list",
                elements="dict",
                options=dict(
                    display_name=dict(aliases=["name"], type="str"),
                    availability_domain=dict(type="str", required=True),
                ),
            ),
            boot_volume_id=dict(aliases=["id"], type="str"),
            state=dict(type="str", default="present", choices=["present", "absent"]),
        )
    )

    module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)

    if not HAS_OCI_PY_SDK:
        module.fail_json(msg="oci python sdk required for this module.")

    resource_helper = ResourceHelper(
        module=module,
        resource_type="boot_volume",
        service_client_class=BlockstorageClient,
        namespace="core",
    )

    result = dict(changed=False)

    if resource_helper.is_delete_using_name():
        result = resource_helper.delete_using_name()
    elif resource_helper.is_delete():
        result = resource_helper.delete()
    elif resource_helper.is_update_using_name():
        result = resource_helper.update_using_name()
    elif resource_helper.is_update():
        result = resource_helper.update()
    elif resource_helper.is_create():
        result = resource_helper.create()

    module.exit_json(**result)
Exemple #53
0
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(
        dict(distribution_id=dict(required=False, type='str'),
             invalidation_id=dict(required=False, type='str'),
             origin_access_identity_id=dict(required=False, type='str'),
             domain_name_alias=dict(required=False, type='str'),
             all_lists=dict(required=False, default=False, type='bool'),
             distribution=dict(required=False, default=False, type='bool'),
             distribution_config=dict(required=False,
                                      default=False,
                                      type='bool'),
             origin_access_identity=dict(required=False,
                                         default=False,
                                         type='bool'),
             origin_access_identity_config=dict(required=False,
                                                default=False,
                                                type='bool'),
             invalidation=dict(required=False, default=False, type='bool'),
             streaming_distribution=dict(required=False,
                                         default=False,
                                         type='bool'),
             streaming_distribution_config=dict(required=False,
                                                default=False,
                                                type='bool'),
             list_origin_access_identities=dict(required=False,
                                                default=False,
                                                type='bool'),
             list_distributions=dict(required=False,
                                     default=False,
                                     type='bool'),
             list_distributions_by_web_acl_id=dict(required=False,
                                                   default=False,
                                                   type='bool'),
             list_invalidations=dict(required=False,
                                     default=False,
                                     type='bool'),
             list_streaming_distributions=dict(required=False,
                                               default=False,
                                               type='bool'),
             summary=dict(required=False, default=False, type='bool')))

    module = AnsibleModule(argument_spec=argument_spec,
                           supports_check_mode=False)
    is_old_facts = module._name == 'cloudfront_facts'
    if is_old_facts:
        module.deprecate(
            "The 'cloudfront_facts' module has been renamed to 'cloudfront_info', "
            "and the renamed one no longer returns ansible_facts",
            version='2.13')

    if not HAS_BOTO3:
        module.fail_json(msg='boto3 is required.')

    service_mgr = CloudFrontServiceManager(module)

    distribution_id = module.params.get('distribution_id')
    invalidation_id = module.params.get('invalidation_id')
    origin_access_identity_id = module.params.get('origin_access_identity_id')
    web_acl_id = module.params.get('web_acl_id')
    domain_name_alias = module.params.get('domain_name_alias')
    all_lists = module.params.get('all_lists')
    distribution = module.params.get('distribution')
    distribution_config = module.params.get('distribution_config')
    origin_access_identity = module.params.get('origin_access_identity')
    origin_access_identity_config = module.params.get(
        'origin_access_identity_config')
    invalidation = module.params.get('invalidation')
    streaming_distribution = module.params.get('streaming_distribution')
    streaming_distribution_config = module.params.get(
        'streaming_distribution_config')
    list_origin_access_identities = module.params.get(
        'list_origin_access_identities')
    list_distributions = module.params.get('list_distributions')
    list_distributions_by_web_acl_id = module.params.get(
        'list_distributions_by_web_acl_id')
    list_invalidations = module.params.get('list_invalidations')
    list_streaming_distributions = module.params.get(
        'list_streaming_distributions')
    summary = module.params.get('summary')

    aliases = []
    result = {'cloudfront': {}}
    facts = {}

    require_distribution_id = (distribution or distribution_config
                               or invalidation or streaming_distribution
                               or streaming_distribution_config
                               or list_invalidations)

    # set default to summary if no option specified
    summary = summary or not (
        distribution or distribution_config or origin_access_identity or
        origin_access_identity_config or invalidation or streaming_distribution
        or streaming_distribution_config or list_origin_access_identities
        or list_distributions_by_web_acl_id or list_invalidations
        or list_streaming_distributions or list_distributions)

    # validations
    if require_distribution_id and distribution_id is None and domain_name_alias is None:
        module.fail_json(
            msg=
            'Error distribution_id or domain_name_alias have not been specified.'
        )
    if (invalidation and invalidation_id is None):
        module.fail_json(msg='Error invalidation_id has not been specified.')
    if (origin_access_identity or origin_access_identity_config
        ) and origin_access_identity_id is None:
        module.fail_json(
            msg='Error origin_access_identity_id has not been specified.')
    if list_distributions_by_web_acl_id and web_acl_id is None:
        module.fail_json(msg='Error web_acl_id has not been specified.')

    # get distribution id from domain name alias
    if require_distribution_id and distribution_id is None:
        distribution_id = service_mgr.get_distribution_id_from_domain_name(
            domain_name_alias)
        if not distribution_id:
            module.fail_json(
                msg=
                'Error unable to source a distribution id from domain_name_alias'
            )

    # set appropriate cloudfront id
    if distribution_id and not list_invalidations:
        facts = {distribution_id: {}}
        aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
        for alias in aliases:
            facts.update({alias: {}})
        if invalidation_id:
            facts.update({invalidation_id: {}})
    elif distribution_id and list_invalidations:
        facts = {distribution_id: {}}
        aliases = service_mgr.get_aliases_from_distribution_id(distribution_id)
        for alias in aliases:
            facts.update({alias: {}})
    elif origin_access_identity_id:
        facts = {origin_access_identity_id: {}}
    elif web_acl_id:
        facts = {web_acl_id: {}}

    # get details based on options
    if distribution:
        facts_to_set = service_mgr.get_distribution(distribution_id)
    if distribution_config:
        facts_to_set = service_mgr.get_distribution_config(distribution_id)
    if origin_access_identity:
        facts[origin_access_identity_id].update(
            service_mgr.get_origin_access_identity(origin_access_identity_id))
    if origin_access_identity_config:
        facts[origin_access_identity_id].update(
            service_mgr.get_origin_access_identity_config(
                origin_access_identity_id))
    if invalidation:
        facts_to_set = service_mgr.get_invalidation(distribution_id,
                                                    invalidation_id)
        facts[invalidation_id].update(facts_to_set)
    if streaming_distribution:
        facts_to_set = service_mgr.get_streaming_distribution(distribution_id)
    if streaming_distribution_config:
        facts_to_set = service_mgr.get_streaming_distribution_config(
            distribution_id)
    if list_invalidations:
        facts_to_set = {
            'invalidations': service_mgr.list_invalidations(distribution_id)
        }
    if 'facts_to_set' in vars():
        facts = set_facts_for_distribution_id_and_alias(
            facts_to_set, facts, distribution_id, aliases)

    # get list based on options
    if all_lists or list_origin_access_identities:
        facts[
            'origin_access_identities'] = service_mgr.list_origin_access_identities(
            )
    if all_lists or list_distributions:
        facts['distributions'] = service_mgr.list_distributions()
    if all_lists or list_streaming_distributions:
        facts[
            'streaming_distributions'] = service_mgr.list_streaming_distributions(
            )
    if list_distributions_by_web_acl_id:
        facts[
            'distributions_by_web_acl_id'] = service_mgr.list_distributions_by_web_acl_id(
                web_acl_id)
    if list_invalidations:
        facts['invalidations'] = service_mgr.list_invalidations(
            distribution_id)

    # default summary option
    if summary:
        facts['summary'] = service_mgr.summary()

    result['changed'] = False
    result['cloudfront'].update(facts)
    if is_old_facts:
        module.exit_json(msg="Retrieved CloudFront facts.",
                         ansible_facts=result)
    else:
        module.exit_json(msg="Retrieved CloudFront info.", **result)
Exemple #54
0
            if _private_true:
                if _private_true:
                    try:
                        import ansible.module_utils.stillbogus as nope2
                    except ImportError:
                        raise
except ImportError:
    nope2 = None

try:
    # optional import from a valid collection with an invalid package
    from ansible_collections.testns.testcoll.plugins.module_utils.bogus import collnope1
except ImportError:
    collnope1 = None

try:
    # optional import from a bogus collection
    from ansible_collections.bogusns.boguscoll.plugins.module_utils.bogus import collnope2
except ImportError:
    collnope2 = None

module = AnsibleModule(argument_spec={})

if not all([yep1, yep2, yep3]):
    module.fail_json(msg='one or more existing optional imports did not resolve')

if any([fromnope1, fromnope2, nope1, nope2, collnope1, collnope2]):
    module.fail_json(msg='one or more missing optional imports resolved unexpectedly')

module.exit_json(msg='all missing optional imports behaved as expected')
def main():
    fields = {
        "host": {"required": False, "type": "str"},
        "username": {"required": False, "type": "str"},
        "password": {"required": False, "type": "str", "default": "", "no_log": True},
        "vdom": {"required": False, "type": "str", "default": "root"},
        "https": {"required": False, "type": "bool", "default": True},
        "ssl_verify": {"required": False, "type": "bool", "default": True},
        "system_fortiguard": {
            "required": False, "type": "dict", "default": None,
            "options": {
                "antispam_cache": {"required": False, "type": "str",
                                   "choices": ["enable", "disable"]},
                "antispam_cache_mpercent": {"required": False, "type": "int"},
                "antispam_cache_ttl": {"required": False, "type": "int"},
                "antispam_expiration": {"required": False, "type": "int"},
                "antispam_force_off": {"required": False, "type": "str",
                                       "choices": ["enable", "disable"]},
                "antispam_license": {"required": False, "type": "int"},
                "antispam_timeout": {"required": False, "type": "int"},
                "auto_join_forticloud": {"required": False, "type": "str",
                                         "choices": ["enable", "disable"]},
                "ddns_server_ip": {"required": False, "type": "str"},
                "ddns_server_port": {"required": False, "type": "int"},
                "load_balance_servers": {"required": False, "type": "int"},
                "outbreak_prevention_cache": {"required": False, "type": "str",
                                              "choices": ["enable", "disable"]},
                "outbreak_prevention_cache_mpercent": {"required": False, "type": "int"},
                "outbreak_prevention_cache_ttl": {"required": False, "type": "int"},
                "outbreak_prevention_expiration": {"required": False, "type": "int"},
                "outbreak_prevention_force_off": {"required": False, "type": "str",
                                                  "choices": ["enable", "disable"]},
                "outbreak_prevention_license": {"required": False, "type": "int"},
                "outbreak_prevention_timeout": {"required": False, "type": "int"},
                "port": {"required": False, "type": "str",
                         "choices": ["53", "8888", "80"]},
                "sdns_server_ip": {"required": False, "type": "str"},
                "sdns_server_port": {"required": False, "type": "int"},
                "service_account_id": {"required": False, "type": "str"},
                "source_ip": {"required": False, "type": "str"},
                "source_ip6": {"required": False, "type": "str"},
                "update_server_location": {"required": False, "type": "str",
                                           "choices": ["usa", "any"]},
                "webfilter_cache": {"required": False, "type": "str",
                                    "choices": ["enable", "disable"]},
                "webfilter_cache_ttl": {"required": False, "type": "int"},
                "webfilter_expiration": {"required": False, "type": "int"},
                "webfilter_force_off": {"required": False, "type": "str",
                                        "choices": ["enable", "disable"]},
                "webfilter_license": {"required": False, "type": "int"},
                "webfilter_timeout": {"required": False, "type": "int"}

            }
        }
    }

    module = AnsibleModule(argument_spec=fields,
                           supports_check_mode=False)

    # legacy_mode refers to using fortiosapi instead of HTTPAPI
    legacy_mode = 'host' in module.params and module.params['host'] is not None and \
                  'username' in module.params and module.params['username'] is not None and \
                  'password' in module.params and module.params['password'] is not None

    if not legacy_mode:
        if module._socket_path:
            connection = Connection(module._socket_path)
            fos = FortiOSHandler(connection)

            is_error, has_changed, result = fortios_system(module.params, fos)
        else:
            module.fail_json(**FAIL_SOCKET_MSG)
    else:
        try:
            from fortiosapi import FortiOSAPI
        except ImportError:
            module.fail_json(msg="fortiosapi module is required")

        fos = FortiOSAPI()

        login(module.params, fos)
        is_error, has_changed, result = fortios_system(module.params, fos)
        fos.logout()

    if not is_error:
        module.exit_json(changed=has_changed, meta=result)
    else:
        module.fail_json(msg="Error in repo", meta=result)
Exemple #56
0
class NetstreamExport(object):
    """Manage NetStream export"""
    def __init__(self, argument_spec):
        self.spec = argument_spec
        self.module = None
        self.__init_module__()

        # NetStream export configuration parameters
        self.type = self.module.params['type']
        self.source_ip = self.module.params['source_ip']
        self.host_ip = self.module.params['host_ip']
        self.host_port = self.module.params['host_port']
        self.host_vpn = self.module.params['host_vpn']
        self.version = self.module.params['version']
        self.as_option = self.module.params['as_option']
        self.bgp_netxhop = self.module.params['bgp_nexthop']
        self.state = self.module.params['state']

        self.commands = list()
        self.config = None
        self.exist_conf = dict()

        # state
        self.changed = False
        self.updates_cmd = list()
        self.results = dict()
        self.proposed = dict()
        self.existing = dict()
        self.end_state = dict()

    def __init_module__(self):
        """init module"""

        self.module = AnsibleModule(argument_spec=self.spec,
                                    supports_check_mode=True)

    def cli_load_config(self, commands):
        """load config by cli"""

        if not self.module.check_mode:
            load_config(self.module, commands)

    def get_netstream_config(self):
        """get current netstream configuration"""

        cmd = "display current-configuration | include ^netstream export"
        rc, out, err = exec_command(self.module, cmd)
        if rc != 0:
            self.module.fail_json(msg=err)
        config = str(out).strip()
        return config

    def get_existing(self):
        """get existing config"""

        self.existing = dict(type=self.type,
                             source_ip=self.exist_conf['source_ip'],
                             host_ip=self.exist_conf['host_ip'],
                             host_port=self.exist_conf['host_port'],
                             host_vpn=self.exist_conf['host_vpn'],
                             version=self.exist_conf['version'],
                             as_option=self.exist_conf['as_option'],
                             bgp_nexthop=self.exist_conf['bgp_netxhop'])

    def get_proposed(self):
        """get proposed config"""

        self.proposed = dict(type=self.type,
                             source_ip=self.source_ip,
                             host_ip=self.host_ip,
                             host_port=self.host_port,
                             host_vpn=self.host_vpn,
                             version=self.version,
                             as_option=self.as_option,
                             bgp_nexthop=self.bgp_netxhop,
                             state=self.state)

    def get_end_state(self):
        """get end config"""
        self.get_config_data()
        self.end_state = dict(type=self.type,
                              source_ip=self.exist_conf['source_ip'],
                              host_ip=self.exist_conf['host_ip'],
                              host_port=self.exist_conf['host_port'],
                              host_vpn=self.exist_conf['host_vpn'],
                              version=self.exist_conf['version'],
                              as_option=self.exist_conf['as_option'],
                              bgp_nexthop=self.exist_conf['bgp_netxhop'])

    def show_result(self):
        """show result"""

        self.results['changed'] = self.changed
        self.results['proposed'] = self.proposed
        self.results['existing'] = self.existing
        self.results['end_state'] = self.end_state
        if self.changed:
            self.results['updates'] = self.updates_cmd
        else:
            self.results['updates'] = list()

        self.module.exit_json(**self.results)

    def cli_add_command(self, command, undo=False):
        """add command to self.update_cmd and self.commands"""

        if undo and command.lower() not in ["quit", "return"]:
            cmd = "undo " + command
        else:
            cmd = command

        self.commands.append(cmd)  # set to device
        if command.lower() not in ["quit", "return"]:
            if cmd not in self.updates_cmd:
                self.updates_cmd.append(cmd)  # show updates result

    def config_nets_export_src_addr(self):
        """Configures the source address for the exported packets"""

        if is_ipv4_addr(self.source_ip):
            if self.type == 'ip':
                cmd = "netstream export ip source %s" % self.source_ip
            else:
                cmd = "netstream export vxlan inner-ip source %s" % self.source_ip
        else:
            if self.type == 'ip':
                cmd = "netstream export ip source ipv6 %s" % self.source_ip
            else:
                cmd = "netstream export vxlan inner-ip source ipv6 %s" % self.source_ip

        if is_config_exist(self.config, cmd):
            self.exist_conf['source_ip'] = self.source_ip
            if self.state == 'present':
                return
            else:
                undo = True
        else:
            if self.state == 'absent':
                return
            else:
                undo = False

        self.cli_add_command(cmd, undo)

    def config_nets_export_host_addr(self):
        """Configures the destination IP address and destination UDP port number"""

        if is_ipv4_addr(self.host_ip):
            if self.type == 'ip':
                cmd = 'netstream export ip host %s %s' % (self.host_ip,
                                                          self.host_port)
            else:
                cmd = 'netstream export vxlan inner-ip host %s %s' % (
                    self.host_ip, self.host_port)
        else:
            if self.type == 'ip':
                cmd = 'netstream export ip host ipv6 %s %s' % (self.host_ip,
                                                               self.host_port)
            else:
                cmd = 'netstream export vxlan inner-ip host ipv6 %s %s' % (
                    self.host_ip, self.host_port)

        if self.host_vpn:
            cmd += " vpn-instance %s" % self.host_vpn

        if is_config_exist(self.config, cmd):
            self.exist_conf['host_ip'] = self.host_ip
            self.exist_conf['host_port'] = self.host_port
            if self.host_vpn:
                self.exist_conf['host_vpn'] = self.host_vpn

            if self.state == 'present':
                return
            else:
                undo = True
        else:
            if self.state == 'absent':
                return
            else:
                undo = False

        self.cli_add_command(cmd, undo)

    def config_nets_export_vxlan_ver(self):
        """Configures the version for the exported packets carrying VXLAN flexible flow statistics"""

        cmd = 'netstream export vxlan inner-ip version 9'

        if is_config_exist(self.config, cmd):
            self.exist_conf['version'] = self.version

            if self.state == 'present':
                return
            else:
                undo = True
        else:
            if self.state == 'absent':
                return
            else:
                undo = False

        self.cli_add_command(cmd, undo)

    def config_nets_export_ip_ver(self):
        """Configures the version number of the exported packets carrying IPv4 flow statistics"""

        cmd = 'netstream export ip version %s' % self.version
        if self.version == '5':
            if self.as_option == 'origin':
                cmd += ' origin-as'
            elif self.as_option == 'peer':
                cmd += ' peer-as'
        else:
            if self.as_option == 'origin':
                cmd += ' origin-as'
            elif self.as_option == 'peer':
                cmd += ' peer-as'

            if self.bgp_netxhop == 'enable':
                cmd += ' bgp-nexthop'

        if cmd == 'netstream export ip version 5':
            cmd_tmp = "netstream export ip version"
            if cmd_tmp in self.config:
                if self.state == 'present':
                    self.cli_add_command(cmd, False)
            else:
                self.exist_conf['version'] = self.version
            return

        if is_config_exist(self.config, cmd):
            self.exist_conf['version'] = self.version
            self.exist_conf['as_option'] = self.as_option
            self.exist_conf['bgp_netxhop'] = self.bgp_netxhop

            if self.state == 'present':
                return
            else:
                undo = True
        else:
            if self.state == 'absent':
                return
            else:
                undo = False

        self.cli_add_command(cmd, undo)

    def config_netstream_export(self):
        """configure netstream export"""

        if self.commands:
            self.cli_load_config(self.commands)
            self.changed = True

    def check_params(self):
        """Check all input params"""

        if not self.type:
            self.module.fail_json(
                msg='Error: The value of type cannot be empty.')

        if self.host_port:
            if not self.host_port.isdigit():
                self.module.fail_json(msg='Error: Host port is invalid.')
            if int(self.host_port) < 1 or int(self.host_port) > 65535:
                self.module.fail_json(
                    msg='Error: Host port is not in the range from 1 to 65535.'
                )

        if self.host_vpn:
            if self.host_vpn == '_public_':
                self.module.fail_json(
                    msg='Error: The host vpn name _public_ is reserved.')
            if len(self.host_vpn) < 1 or len(self.host_vpn) > 31:
                self.module.fail_json(
                    msg=
                    'Error: The host vpn name length is not in the range from 1 to 31.'
                )

        if self.type == 'vxlan' and self.version == '5':
            self.module.fail_json(
                msg="Error: When type is vxlan, version must be 9.")

        if self.type == 'ip' and self.version == '5' and self.bgp_netxhop == 'enable':
            self.module.fail_json(
                msg=
                "Error: When type=ip and version=5, bgp_netxhop is not supported."
            )

        if (self.host_ip and not self.host_port) or (self.host_port
                                                     and not self.host_ip):
            self.module.fail_json(
                msg="Error: host_ip and host_port must both exist or not exist."
            )

    def get_config_data(self):
        """get configuration commands and current configuration"""

        self.exist_conf['type'] = self.type
        self.exist_conf['source_ip'] = None
        self.exist_conf['host_ip'] = None
        self.exist_conf['host_port'] = None
        self.exist_conf['host_vpn'] = None
        self.exist_conf['version'] = None
        self.exist_conf['as_option'] = None
        self.exist_conf['bgp_netxhop'] = 'disable'

        self.config = self.get_netstream_config()

        if self.type and self.source_ip:
            self.config_nets_export_src_addr()

        if self.type and self.host_ip and self.host_port:
            self.config_nets_export_host_addr()

        if self.type == 'vxlan' and self.version == '9':
            self.config_nets_export_vxlan_ver()

        if self.type == 'ip' and self.version:
            self.config_nets_export_ip_ver()

    def work(self):
        """execute task"""

        self.check_params()
        self.get_proposed()
        self.get_config_data()
        self.get_existing()

        self.config_netstream_export()

        self.get_end_state()
        self.show_result()
def main():
    """entry point for module execution
    """
    argument_spec = dict(
        # { command: <str>, output: <str>, prompt: <str>, response: <str> }
        commands=dict(type="list", required=True, elements="raw"),
        wait_for=dict(type="list", aliases=["waitfor"], elements="str"),
        match=dict(default="all", choices=["any", "all"]),
        retries=dict(default=10, type="int"),
        interval=dict(default=1, type="int"),
    )

    argument_spec.update(nxos_argument_spec)

    module = AnsibleModule(
        argument_spec=argument_spec, supports_check_mode=True
    )

    warnings = list()
    result = {"changed": False, "warnings": warnings}
    commands = parse_commands(module, warnings)
    wait_for = module.params["wait_for"] or list()

    try:
        conditionals = [Conditional(c) for c in wait_for]
    except AttributeError as exc:
        module.fail_json(msg=to_text(exc))

    retries = module.params["retries"]
    interval = module.params["interval"]
    match = module.params["match"]

    while retries > 0:
        responses = run_commands(module, commands)

        for item in list(conditionals):
            try:
                if item(responses):
                    if match == "any":
                        conditionals = list()
                        break
                    conditionals.remove(item)
            except FailedConditionalError as exc:
                module.fail_json(msg=to_text(exc))

        if not conditionals:
            break

        time.sleep(interval)
        retries -= 1

    if conditionals:
        failed_conditions = [item.raw for item in conditionals]
        msg = "One or more conditional statements have not been satisfied"
        module.fail_json(msg=msg, failed_conditions=failed_conditions)

    result.update(
        {"stdout": responses, "stdout_lines": list(to_lines(responses))}
    )

    module.exit_json(**result)
Exemple #58
0
def main():
    argument_spec = openstack_full_argument_spec(
        state=dict(default='present', choices=['absent', 'present']),
        name=dict(required=True),
        admin_state_up=dict(type='bool', default=True),
        enable_snat=dict(type='bool'),
        network=dict(default=None),
        interfaces=dict(type='list', default=None),
        external_fixed_ips=dict(type='list', default=None),
        project=dict(default=None)
    )

    module_kwargs = openstack_module_kwargs()
    module = AnsibleModule(argument_spec,
                           supports_check_mode=True,
                           **module_kwargs)

    state = module.params['state']
    name = module.params['name']
    network = module.params['network']
    project = module.params['project']

    if module.params['external_fixed_ips'] and not network:
        module.fail_json(msg='network is required when supplying external_fixed_ips')

    sdk, cloud = openstack_cloud_from_module(module)
    try:
        if project is not None:
            proj = cloud.get_project(project)
            if proj is None:
                module.fail_json(msg='Project %s could not be found' % project)
            project_id = proj['id']
            filters = {'tenant_id': project_id}
        else:
            project_id = None
            filters = None

        router = cloud.get_router(name, filters=filters)
        net = None
        if network:
            net = cloud.get_network(network)
            if not net:
                module.fail_json(msg='network %s not found' % network)

        # Validate and cache the subnet IDs so we can avoid duplicate checks
        # and expensive API calls.
        external_ids, subnet_internal_ids, internal_portids = _validate_subnets(module, cloud)
        if module.check_mode:
            module.exit_json(
                changed=_system_state_change(cloud, module, router, net, subnet_internal_ids, internal_portids)
            )

        if state == 'present':
            changed = False

            if not router:
                kwargs = _build_kwargs(cloud, module, router, net)
                if project_id:
                    kwargs['project_id'] = project_id
                router = cloud.create_router(**kwargs)
                for int_s_id in subnet_internal_ids:
                    cloud.add_router_interface(router, subnet_id=int_s_id)
                changed = True
                # add interface by port id as well
                for int_p_id in internal_portids:
                    cloud.add_router_interface(router, port_id=int_p_id)
                changed = True
            else:
                if _needs_update(cloud, module, router, net, subnet_internal_ids, internal_portids):
                    kwargs = _build_kwargs(cloud, module, router, net)
                    updated_router = cloud.update_router(**kwargs)

                    # Protect against update_router() not actually
                    # updating the router.
                    if not updated_router:
                        changed = False

                    # On a router update, if any internal interfaces were supplied,
                    # just detach all existing internal interfaces and attach the new.
                    if internal_portids or subnet_internal_ids:
                        router = updated_router
                        ports = _router_internal_interfaces(cloud, router)
                        for port in ports:
                            cloud.remove_router_interface(router, port_id=port['id'])
                    if internal_portids:
                        external_ids, subnet_internal_ids, internal_portids = _validate_subnets(module, cloud)
                        for int_p_id in internal_portids:
                            cloud.add_router_interface(router, port_id=int_p_id)
                        changed = True
                    if subnet_internal_ids:
                        for s_id in subnet_internal_ids:
                            cloud.add_router_interface(router, subnet_id=s_id)
                        changed = True

            module.exit_json(changed=changed,
                             router=router,
                             id=router['id'])

        elif state == 'absent':
            if not router:
                module.exit_json(changed=False)
            else:
                # We need to detach all internal interfaces on a router before
                # we will be allowed to delete it.
                ports = _router_internal_interfaces(cloud, router)
                router_id = router['id']
                for port in ports:
                    cloud.remove_router_interface(router, port_id=port['id'])
                cloud.delete_router(router_id)
                module.exit_json(changed=True)

    except sdk.exceptions.OpenStackCloudException as e:
        module.fail_json(msg=str(e))
Exemple #59
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            binary_path=dict(type='path'),
            state=dict(type='str',
                       default='present',
                       choices=['present', 'absent']),
            plugin_path=dict(type='str', ),
            plugin_name=dict(type='str', ),
            # Helm options
            context=dict(type='str',
                         aliases=['kube_context'],
                         fallback=(env_fallback, ['K8S_AUTH_CONTEXT'])),
            kubeconfig=dict(type='path',
                            aliases=['kubeconfig_path'],
                            fallback=(env_fallback, ['K8S_AUTH_KUBECONFIG'])),

            # Generic auth key
            host=dict(type='str', fallback=(env_fallback, ['K8S_AUTH_HOST'])),
            ca_cert=dict(type='path',
                         aliases=['ssl_ca_cert'],
                         fallback=(env_fallback, ['K8S_AUTH_SSL_CA_CERT'])),
            validate_certs=dict(type='bool',
                                default=True,
                                aliases=['verify_ssl'],
                                fallback=(env_fallback,
                                          ['K8S_AUTH_VERIFY_SSL'])),
            api_key=dict(type='str',
                         no_log=True,
                         fallback=(env_fallback, ['K8S_AUTH_API_KEY']))),
        supports_check_mode=True,
        required_if=[
            ("state", "present", ("plugin_path", )),
            ("state", "absent", ("plugin_name", )),
        ],
        mutually_exclusive=[('plugin_name', 'plugin_path'),
                            ("context", "ca_cert"),
                            ("context", "validate_certs"),
                            ("kubeconfig", "ca_cert"),
                            ("kubeconfig", "validate_certs")],
    )

    bin_path = module.params.get('binary_path')
    state = module.params.get('state')

    if bin_path is not None:
        helm_cmd_common = bin_path
    else:
        helm_cmd_common = 'helm'

    helm_cmd_common = module.get_bin_path(helm_cmd_common, required=True)

    helm_cmd_common += " plugin"

    if state == 'present':
        helm_cmd_common += " install %s" % module.params.get('plugin_path')
        if not module.check_mode:
            rc, out, err = run_helm(module,
                                    helm_cmd_common,
                                    fails_on_error=False)
        else:
            rc, out, err = (0, '', '')

        if rc == 1 and 'plugin already exists' in err:
            module.exit_json(failed=False,
                             changed=False,
                             msg="Plugin already exists",
                             command=helm_cmd_common,
                             stdout=out,
                             stderr=err,
                             rc=rc)
        elif rc == 0:
            module.exit_json(
                failed=False,
                changed=True,
                msg="Plugin installed successfully",
                command=helm_cmd_common,
                stdout=out,
                stderr=err,
                rc=rc,
            )
        else:
            module.fail_json(
                msg="Failure when executing Helm command.",
                command=helm_cmd_common,
                stdout=out,
                stderr=err,
                rc=rc,
            )
    elif state == 'absent':
        plugin_name = module.params.get('plugin_name')
        rc, output, err = get_helm_plugin_list(module,
                                               helm_bin=helm_cmd_common)
        out = parse_helm_plugin_list(module, output=output.splitlines())

        if not out:
            module.exit_json(failed=False,
                             changed=False,
                             msg="Plugin not found or is already uninstalled",
                             command=helm_cmd_common + " list",
                             stdout=output,
                             stderr=err,
                             rc=rc)

        found = False
        for line in out:
            if line[0] == plugin_name:
                found = True
                break
        if not found:
            module.exit_json(failed=False,
                             changed=False,
                             msg="Plugin not found or is already uninstalled",
                             command=helm_cmd_common + " list",
                             stdout=output,
                             stderr=err,
                             rc=rc)

        helm_uninstall_cmd = "%s uninstall %s" % (helm_cmd_common, plugin_name)
        if not module.check_mode:
            rc, out, err = run_helm(module,
                                    helm_uninstall_cmd,
                                    fails_on_error=False)
        else:
            rc, out, err = (0, '', '')

        if rc == 0:
            module.exit_json(changed=True,
                             msg="Plugin uninstalled successfully",
                             command=helm_uninstall_cmd,
                             stdout=out,
                             stderr=err,
                             rc=rc)
        module.fail_json(
            msg="Failed to get Helm plugin uninstall",
            command=helm_uninstall_cmd,
            stdout=out,
            stderr=err,
            rc=rc,
        )
Exemple #60
0
def main():
    """ main entry point for module execution
    """
    argument_spec = dict(
        src=dict(type='path'),
        replace_src=dict(),
        lines=dict(aliases=['commands'], type='list'),
        parents=dict(type='list'),
        before=dict(type='list'),
        after=dict(type='list'),
        match=dict(default='line', choices=['line', 'strict', 'exact',
                                            'none']),
        replace=dict(default='line', choices=['line', 'block', 'config']),
        running_config=dict(aliases=['config']),
        intended_config=dict(),
        defaults=dict(type='bool', default=False),
        backup=dict(type='bool', default=False),
        save_when=dict(choices=['always', 'never', 'modified', 'changed'],
                       default='never'),
        diff_against=dict(choices=['running', 'startup', 'intended']),
        diff_ignore_lines=dict(type='list'),

        # save is deprecated as of ans2.4, use save_when instead
        save=dict(default=False, type='bool', removed_in_version='2.8'),

        # force argument deprecated in ans2.2
        force=dict(default=False, type='bool', removed_in_version='2.6'))

    argument_spec.update(nxos_argument_spec)

    mutually_exclusive = [('lines', 'src', 'replace_src'), ('parents', 'src'),
                          ('save', 'save_when')]

    required_if = [('match', 'strict', ['lines']),
                   ('match', 'exact', ['lines']),
                   ('replace', 'block', ['lines']),
                   ('replace', 'config', ['replace_src']),
                   ('diff_against', 'intended', ['intended_config'])]

    module = AnsibleModule(argument_spec=argument_spec,
                           mutually_exclusive=mutually_exclusive,
                           required_if=required_if,
                           supports_check_mode=True)

    warnings = list()
    nxos_check_args(module, warnings)

    result = {'changed': False, 'warnings': warnings}

    config = None

    try:
        info = get_capabilities(module)
        api = info.get('network_api', 'nxapi')
        device_info = info.get('device_info', {})
        os_platform = device_info.get('network_os_platform', '')
    except ConnectionError:
        api = ''
        os_platform = ''

    if api == 'cliconf' and module.params['replace'] == 'config':
        if '9K' not in os_platform:
            module.fail_json(
                msg=
                'replace: config is supported only on Nexus 9K series switches'
            )

    if module.params['replace_src']:
        if module.params['replace'] != 'config':
            module.fail_json(
                msg='replace: config is required with replace_src')

    if module.params['backup'] or (module._diff and
                                   module.params['diff_against'] == 'running'):
        contents = get_config(module)
        config = NetworkConfig(indent=2, contents=contents)
        if module.params['backup']:
            result['__backup__'] = contents

    if any((module.params['src'], module.params['lines'],
            module.params['replace_src'])):
        match = module.params['match']
        replace = module.params['replace']

        candidate = get_candidate(module)

        if match != 'none' and replace != 'config':
            config = get_running_config(module, config)
            path = module.params['parents']
            configobjs = candidate.difference(config,
                                              match=match,
                                              replace=replace,
                                              path=path)
        else:
            configobjs = candidate.items

        if configobjs:
            commands = dumps(configobjs, 'commands').split('\n')

            if module.params['before']:
                commands[:0] = module.params['before']

            if module.params['after']:
                commands.extend(module.params['after'])

            result['commands'] = commands
            result['updates'] = commands

            if not module.check_mode:
                load_config(module, commands)

            result['changed'] = True

    running_config = module.params['running_config']
    startup_config = None

    diff_ignore_lines = module.params['diff_ignore_lines']

    if module.params['save_when'] == 'always' or module.params['save']:
        save_config(module, result)
    elif module.params['save_when'] == 'modified':
        output = execute_show_commands(
            module, ['show running-config', 'show startup-config'])

        running_config = NetworkConfig(indent=1,
                                       contents=output[0],
                                       ignore_lines=diff_ignore_lines)
        startup_config = NetworkConfig(indent=1,
                                       contents=output[1],
                                       ignore_lines=diff_ignore_lines)

        if running_config.sha1 != startup_config.sha1:
            save_config(module, result)
    elif module.params['save_when'] == 'changed' and result['changed']:
        save_config(module, result)

    if module._diff:
        if not running_config:
            output = execute_show_commands(module, 'show running-config')
            contents = output[0]
        else:
            contents = running_config

        # recreate the object in order to process diff_ignore_lines
        running_config = NetworkConfig(indent=1,
                                       contents=contents,
                                       ignore_lines=diff_ignore_lines)

        if module.params['diff_against'] == 'running':
            if module.check_mode:
                module.warn(
                    "unable to perform diff against running-config due to check mode"
                )
                contents = None
            else:
                contents = config.config_text

        elif module.params['diff_against'] == 'startup':
            if not startup_config:
                output = execute_show_commands(module, 'show startup-config')
                contents = output[0]
            else:
                contents = output[0]
                contents = startup_config.config_text

        elif module.params['diff_against'] == 'intended':
            contents = module.params['intended_config']

        if contents is not None:
            base_config = NetworkConfig(indent=1,
                                        contents=contents,
                                        ignore_lines=diff_ignore_lines)

            if running_config.sha1 != base_config.sha1:
                if module.params['diff_against'] == 'intended':
                    before = running_config
                    after = base_config
                elif module.params['diff_against'] in ('startup', 'running'):
                    before = base_config
                    after = running_config

                result.update({
                    'changed': True,
                    'diff': {
                        'before': str(before),
                        'after': str(after)
                    }
                })

    module.exit_json(**result)