Пример #1
0
def main():
    module = AnsibleModule(argument_spec={
        'name':
        dict(required=True, type='str'),
        'compose':
        dict(required=False, type='list', default=[]),
        'prune':
        dict(default=False, type='bool'),
        'with_registry_auth':
        dict(default=False, type='bool'),
        'resolve_image':
        dict(type='str', choices=['always', 'changed', 'never']),
        'state':
        dict(default='present', choices=['present', 'absent']),
        'absent_retries':
        dict(type='int', default=0),
        'absent_retries_interval':
        dict(type='int', default=1)
    },
                           supports_check_mode=False)

    if not HAS_JSONDIFF:
        return module.fail_json(
            msg="jsondiff is not installed, try 'pip install jsondiff'")

    if not HAS_YAML:
        return module.fail_json(
            msg="yaml is not installed, try 'pip install pyyaml'")

    state = module.params['state']
    compose = module.params['compose']
    name = module.params['name']
    absent_retries = module.params['absent_retries']
    absent_retries_interval = module.params['absent_retries_interval']

    if state == 'present':
        if not compose:
            module.fail_json(msg=("compose parameter must be a list "
                                  "containing at least one element"))

        compose_files = []
        for i, compose_def in enumerate(compose):
            if isinstance(compose_def, dict):
                compose_file_fd, compose_file = tempfile.mkstemp()
                module.add_cleanup_file(compose_file)
                with os.fdopen(compose_file_fd, 'w') as stack_file:
                    compose_files.append(compose_file)
                    stack_file.write(yaml_dump(compose_def))
            elif isinstance(compose_def, string_types):
                compose_files.append(compose_def)
            else:
                module.fail_json(msg="compose element '%s' must be a " +
                                 "string or a dictionary" % compose_def)

        before_stack_services = docker_stack_inspect(module, name)

        rc, out, err = docker_stack_deploy(module, name, compose_files)

        after_stack_services = docker_stack_inspect(module, name)

        if rc != 0:
            module.fail_json(msg="docker stack up deploy command failed",
                             out=out,
                             rc=rc,
                             err=err)

        before_after_differences = json_diff(before_stack_services,
                                             after_stack_services)
        for k in before_after_differences.keys():
            if isinstance(before_after_differences[k], dict):
                before_after_differences[k].pop('UpdatedAt', None)
                before_after_differences[k].pop('Version', None)
                if not list(before_after_differences[k].keys()):
                    before_after_differences.pop(k)

        if not before_after_differences:
            module.exit_json(changed=False)
        else:
            module.exit_json(changed=True,
                             docker_stack_spec_diff=json_diff(
                                 before_stack_services,
                                 after_stack_services,
                                 dump=True))

    else:
        if docker_stack_services(module, name):
            rc, out, err = docker_stack_rm(module, name, absent_retries,
                                           absent_retries_interval)
            if rc != 0:
                module.fail_json(msg="'docker stack down' command failed",
                                 out=out,
                                 rc=rc,
                                 err=err)
            else:
                module.exit_json(changed=True, msg=out, err=err, rc=rc)
        module.exit_json(changed=False)
Пример #2
0
def main():
    module = AnsibleModule(argument_spec=dict(
        dest=dict(type='path'),
        repo=dict(required=True, aliases=['name']),
        version=dict(default='HEAD'),
        remote=dict(default='origin'),
        refspec=dict(default=None),
        reference=dict(default=None),
        force=dict(default='no', type='bool'),
        depth=dict(default=None, type='int'),
        clone=dict(default='yes', type='bool'),
        update=dict(default='yes', type='bool'),
        verify_commit=dict(default='no', type='bool'),
        accept_hostkey=dict(default='no', type='bool'),
        key_file=dict(default=None, type='path', required=False),
        ssh_opts=dict(default=None, required=False),
        executable=dict(default=None, type='path'),
        bare=dict(default='no', type='bool'),
        recursive=dict(default='yes', type='bool'),
        track_submodules=dict(default='no', type='bool'),
        umask=dict(default=None, type='raw'),
    ),
                           supports_check_mode=True)

    dest = module.params['dest']
    repo = module.params['repo']
    version = module.params['version']
    remote = module.params['remote']
    refspec = module.params['refspec']
    force = module.params['force']
    depth = module.params['depth']
    update = module.params['update']
    allow_clone = module.params['clone']
    bare = module.params['bare']
    verify_commit = module.params['verify_commit']
    reference = module.params['reference']
    git_path = module.params['executable'] or module.get_bin_path('git', True)
    key_file = module.params['key_file']
    ssh_opts = module.params['ssh_opts']
    umask = module.params['umask']

    result = dict(changed=False, warnings=list())

    # evaluate and set the umask before doing anything else
    if umask is not None:
        if not isinstance(umask, string_types):
            module.fail_json(
                msg="umask must be defined as a quoted octal integer")
        try:
            umask = int(umask, 8)
        except:
            module.fail_json(msg="umask must be an octal integer",
                             details=str(sys.exc_info()[1]))
        os.umask(umask)

    # Certain features such as depth require a file:/// protocol for path based urls
    # so force a protocol here ...
    if repo.startswith('/'):
        repo = 'file://' + repo

    # We screenscrape a huge amount of git commands so use C locale anytime we
    # call run_command()
    module.run_command_environ_update = dict(LANG='C',
                                             LC_ALL='C',
                                             LC_MESSAGES='C',
                                             LC_CTYPE='C')

    gitconfig = None
    if not dest and allow_clone:
        module.fail_json(
            msg="the destination directory must be specified unless clone=no")
    elif dest:
        dest = os.path.abspath(dest)
        if bare:
            gitconfig = os.path.join(dest, 'config')
        else:
            gitconfig = os.path.join(dest, '.git', 'config')

    # create a wrapper script and export
    # GIT_SSH=<path> as an environment variable
    # for git to use the wrapper script
    ssh_wrapper = None
    if key_file or ssh_opts:
        ssh_wrapper = write_ssh_wrapper()
        set_git_ssh(ssh_wrapper, key_file, ssh_opts)
        module.add_cleanup_file(path=ssh_wrapper)

    # add the git repo's hostkey
    if module.params['ssh_opts'] is not None:
        if "-o StrictHostKeyChecking=no" not in module.params['ssh_opts']:
            add_git_host_key(module,
                             repo,
                             accept_hostkey=module.params['accept_hostkey'])
    else:
        add_git_host_key(module,
                         repo,
                         accept_hostkey=module.params['accept_hostkey'])
    git_version_used = git_version(git_path, module)

    if depth is not None and git_version_used < LooseVersion('1.9.1'):
        result['warnings'].append(
            "Your git version is too old to fully support the depth argument. Falling back to full checkouts."
        )
        depth = None

    recursive = module.params['recursive']
    track_submodules = module.params['track_submodules']

    result.update(before=None)
    local_mods = False
    need_fetch = True
    if (dest and not os.path.exists(gitconfig)) or (not dest
                                                    and not allow_clone):
        # if there is no git configuration, do a clone operation unless:
        # * the user requested no clone (they just want info)
        # * we're doing a check mode test
        # In those cases we do an ls-remote
        if module.check_mode or not allow_clone:
            remote_head = get_remote_head(git_path, module, dest, version,
                                          repo, bare)
            result.update(changed=True, after=remote_head)
            if module._diff:
                diff = get_diff(module, git_path, dest, repo, remote, depth,
                                bare, result['before'], result['after'])
                if diff:
                    result['diff'] = diff
            module.exit_json(**result)
        # there's no git config, so clone
        clone(git_path, module, repo, dest, remote, depth, version, bare,
              reference, refspec, verify_commit)
        need_fetch = False
    elif not update:
        # Just return having found a repo already in the dest path
        # this does no checking that the repo is the actual repo
        # requested.
        result['before'] = get_version(module, git_path, dest)
        result.update(after=result['before'])
        module.exit_json(**result)
    else:
        # else do a pull
        local_mods = has_local_mods(module, git_path, dest, bare)
        result['before'] = get_version(module, git_path, dest)
        if local_mods:
            # failure should happen regardless of check mode
            if not force:
                module.fail_json(
                    msg="Local modifications exist in repository (force=no).",
                    **result)
            # if force and in non-check mode, do a reset
            if not module.check_mode:
                reset(git_path, module, dest)
                result.update(changed=True, msg='Local modifications exist.')

        # exit if already at desired sha version
        if module.check_mode:
            remote_url = get_remote_url(git_path, module, dest, remote)
            remote_url_changed = remote_url and remote_url != repo and unfrackgitpath(
                remote_url) != unfrackgitpath(repo)
        else:
            remote_url_changed = set_remote_url(git_path, module, repo, dest,
                                                remote)
        result.update(remote_url_changed=remote_url_changed)

        if module.check_mode:
            remote_head = get_remote_head(git_path, module, dest, version,
                                          remote, bare)
            result.update(changed=(result['before'] != remote_head
                                   or remote_url_changed),
                          after=remote_head)
            # FIXME: This diff should fail since the new remote_head is not fetched yet?!
            if module._diff:
                diff = get_diff(module, git_path, dest, repo, remote, depth,
                                bare, result['before'], result['after'])
                if diff:
                    result['diff'] = diff
            module.exit_json(**result)
        else:
            fetch(git_path, module, repo, dest, version, remote, depth, bare,
                  refspec, git_version_used)

        result['after'] = get_version(module, git_path, dest)

    # switch to version specified regardless of whether
    # we got new revisions from the repository
    if not bare:
        switch_version(git_path, module, dest, remote, version, verify_commit,
                       depth)

    # Deal with submodules
    submodules_updated = False
    if recursive and not bare:
        submodules_updated = submodules_fetch(git_path, module, remote,
                                              track_submodules, dest)
        if submodules_updated:
            result.update(submodules_changed=submodules_updated)

            if module.check_mode:
                result.update(changed=True, after=remote_head)
                module.exit_json(**result)

            # Switch to version specified
            submodule_update(git_path,
                             module,
                             dest,
                             track_submodules,
                             force=force)

    # determine if we changed anything
    result['after'] = get_version(module, git_path, dest)

    if result['before'] != result[
            'after'] or local_mods or submodules_updated or remote_url_changed:
        result.update(changed=True)
        if module._diff:
            diff = get_diff(module, git_path, dest, repo, remote, depth, bare,
                            result['before'], result['after'])
            if diff:
                result['diff'] = diff

    # cleanup the wrapper script
    if ssh_wrapper:
        try:
            os.remove(ssh_wrapper)
        except OSError:
            # No need to fail if the file already doesn't exist
            pass

    module.exit_json(**result)
Пример #3
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            account_key_src=dict(type='path', aliases=['account_key']),
            account_key_content=dict(type='str', no_log=True),
            acme_directory=dict(
                required=False,
                default='https://acme-staging.api.letsencrypt.org/directory',
                type='str'),
            acme_version=dict(required=False,
                              default=1,
                              choices=[1, 2],
                              type='int'),
            validate_certs=dict(required=False, default=True, type='bool'),
            private_key_src=dict(type='path'),
            private_key_content=dict(type='str', no_log=True),
            certificate=dict(required=True, type='path'),
            revoke_reason=dict(required=False, type='int'),
        ),
        required_one_of=([
            'account_key_src', 'account_key_content', 'private_key_src',
            'private_key_content'
        ], ),
        mutually_exclusive=([
            'account_key_src', 'account_key_content', 'private_key_src',
            'private_key_content'
        ], ),
        supports_check_mode=False,
    )

    if not module.params.get('validate_certs'):
        module.warn(
            warning=
            'Disabling certificate validation for communications with ACME endpoint. '
            +
            'This should only be done for testing against a local ACME server for '
            + 'development purposes, but *never* for production purposes.')

    try:
        account = ACMEAccount(module)
        # Load certificate
        certificate_lines = []
        try:
            with open(module.params.get('certificate'), "rt") as f:
                header_line_count = 0
                for line in f:
                    if line.startswith('-----'):
                        header_line_count += 1
                        if header_line_count == 2:
                            # If certificate file contains other certs appended
                            # (like intermediate certificates), ignore these.
                            break
                        continue
                    certificate_lines.append(line.strip())
        except Exception as err:
            raise ModuleFailException("cannot load certificate file: %s" %
                                      to_native(err),
                                      exception=traceback.format_exc())
        certificate = nopad_b64(base64.b64decode(''.join(certificate_lines)))
        # Construct payload
        payload = {'certificate': certificate}
        if module.params.get('revoke_reason') is not None:
            payload['reason'] = module.params.get('revoke_reason')
        # Determine endpoint
        if module.params.get('acme_version') == 1:
            endpoint = account.directory['revoke-cert']
            payload['resource'] = 'revoke-cert'
        else:
            endpoint = account.directory['revokeCert']
        # Get hold of private key (if available) and make sure it comes from disk
        private_key = module.params.get('private_key_src')
        if module.params.get('private_key_content') is not None:
            fd, tmpsrc = tempfile.mkstemp()
            module.add_cleanup_file(
                tmpsrc)  # Ansible will delete the file on exit
            f = os.fdopen(fd, 'wb')
            try:
                f.write(
                    module.params.get('private_key_content').encode('utf-8'))
                private_key = tmpsrc
            except Exception as err:
                try:
                    f.close()
                except Exception as e:
                    pass
                raise ModuleFailException(
                    "failed to create temporary content file: %s" %
                    to_native(err),
                    exception=traceback.format_exc())
            f.close()
        # Revoke certificate
        if private_key:
            # Step 1: load and parse private key
            error, private_key_data = account.parse_account_key(private_key)
            if error:
                raise ModuleFailException(
                    "error while parsing private key: %s" % error)
            # Step 2: sign revokation request with private key
            jws_header = {
                "alg": private_key_data['alg'],
                "jwk": private_key_data['jwk'],
            }
            result, info = account.send_signed_request(
                endpoint,
                payload,
                key=private_key,
                key_data=private_key_data,
                jws_header=jws_header)
        else:
            # Step 1: get hold of account URI
            changed = account.init_account(
                [],
                allow_creation=False,
                update_contact=False,
            )
            if changed:
                raise AssertionError('Unwanted account change')
            # Step 2: sign revokation request with account key
            result, info = account.send_signed_request(endpoint, payload)
        if info['status'] != 200:
            if module.params.get('acme_version') == 1:
                error_type = 'urn:acme:error:malformed'
            else:
                error_type = 'urn:ietf:params:acme:error:malformed'
            if result.get('type') == error_type and result.get(
                    'detail') == 'Certificate already revoked':
                # Fallback: boulder returns this in case the certificate was already revoked.
                module.exit_json(changed=False)
            raise ModuleFailException(
                'Error revoking certificate: {0} {1}'.format(
                    info['status'], result))
        module.exit_json(changed=True)
    except ModuleFailException as e:
        e.do_fail(module)
Пример #4
0
def main():
    module = AnsibleModule(
        argument_spec = dict(
            dest=dict(type='path'),
            repo=dict(required=True, aliases=['name']),
            version=dict(default='HEAD'),
            remote=dict(default='origin'),
            refspec=dict(default=None),
            reference=dict(default=None),
            force=dict(default='no', type='bool'),
            depth=dict(default=None, type='int'),
            clone=dict(default='yes', type='bool'),
            update=dict(default='yes', type='bool'),
            verify_commit=dict(default='no', type='bool'),
            accept_hostkey=dict(default='no', type='bool'),
            key_file=dict(default=None, type='path', required=False),
            ssh_opts=dict(default=None, required=False),
            executable=dict(default=None, type='path'),
            bare=dict(default='no', type='bool'),
            recursive=dict(default='yes', type='bool'),
            track_submodules=dict(default='no', type='bool'),
            umask=dict(default=None, type='raw'),
        ),
        supports_check_mode=True
    )

    dest      = module.params['dest']
    repo      = module.params['repo']
    version   = module.params['version']
    remote    = module.params['remote']
    refspec   = module.params['refspec']
    force     = module.params['force']
    depth     = module.params['depth']
    update    = module.params['update']
    allow_clone = module.params['clone']
    bare      = module.params['bare']
    verify_commit = module.params['verify_commit']
    reference = module.params['reference']
    git_path  = module.params['executable'] or module.get_bin_path('git', True)
    key_file  = module.params['key_file']
    ssh_opts  = module.params['ssh_opts']
    umask  = module.params['umask']

    result = dict( warnings=list() )

    # evaluate and set the umask before doing anything else
    if umask is not None:
        if not isinstance(umask, string_types):
            module.fail_json(msg="umask must be defined as a quoted octal integer")
        try:
            umask = int(umask, 8)
        except:
            module.fail_json(msg="umask must be an octal integer",
                             details=str(sys.exc_info()[1]))
        os.umask(umask)

    # Certain features such as depth require a file:/// protocol for path based urls
    # so force a protocal here ...
    if repo.startswith('/'):
        repo = 'file://' + repo

    # We screenscrape a huge amount of git commands so use C locale anytime we
    # call run_command()
    module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')

    gitconfig = None
    if not dest and allow_clone:
        module.fail_json(msg="the destination directory must be specified unless clone=no")
    elif dest:
        dest = os.path.abspath(dest)
        if bare:
            gitconfig = os.path.join(dest, 'config')
        else:
            gitconfig = os.path.join(dest, '.git', 'config')

    # create a wrapper script and export
    # GIT_SSH=<path> as an environment variable
    # for git to use the wrapper script
    ssh_wrapper = None
    if key_file or ssh_opts:
        ssh_wrapper = write_ssh_wrapper()
        set_git_ssh(ssh_wrapper, key_file, ssh_opts)
        module.add_cleanup_file(path=ssh_wrapper)

    # add the git repo's hostkey
    if module.params['ssh_opts'] is not None:
        if "-o StrictHostKeyChecking=no" not in module.params['ssh_opts']:
            add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
    else:
        add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
    git_version_used = git_version(git_path, module)

    if depth is not None and git_version_used < LooseVersion('1.9.1'):
        result['warnings'].append("Your git version is too old to fully support the depth argument. Falling back to full checkouts.")
        depth = None

    recursive = module.params['recursive']
    track_submodules = module.params['track_submodules']

    result.update(before=None)
    local_mods = False
    need_fetch = True
    if (dest and not os.path.exists(gitconfig)) or (not dest and not allow_clone):
        # if there is no git configuration, do a clone operation unless:
        # * the user requested no clone (they just want info)
        # * we're doing a check mode test
        # In those cases we do an ls-remote
        if module.check_mode or not allow_clone:
            remote_head = get_remote_head(git_path, module, dest, version, repo, bare)
            result.update(changed=True, after=remote_head)
            if module._diff:
                diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
                if diff:
                    result['diff'] = diff
            module.exit_json(**result)
        # there's no git config, so clone
        clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec, verify_commit)
        need_fetch = False
    elif not update:
        # Just return having found a repo already in the dest path
        # this does no checking that the repo is the actual repo
        # requested.
        result['before'] = get_version(module, git_path, dest)
        result.update(changed=False, after=result['before'])
        module.exit_json(**result)
    else:
        # else do a pull
        local_mods = has_local_mods(module, git_path, dest, bare)
        result['before'] = get_version(module, git_path, dest)
        if local_mods:
            # failure should happen regardless of check mode
            if not force:
                module.fail_json(msg="Local modifications exist in repository (force=no).", **result)
            # if force and in non-check mode, do a reset
            if not module.check_mode:
                reset(git_path, module, dest)

        # exit if already at desired sha version
        if module.check_mode:
            remote_url = get_remote_url(git_path, module, dest, remote)
            remote_url_changed = remote_url and remote_url != repo and remote_url != unfrackgitpath(repo)
        else:
            remote_url_changed = set_remote_url(git_path, module, repo, dest, remote)
        if remote_url_changed:
            result.update(remote_url_changed=True)

        if need_fetch:
            if module.check_mode:
                remote_head = get_remote_head(git_path, module, dest, version, remote, bare)
                result.update(changed=(result['before'] != remote_head), after=remote_head)
                # FIXME: This diff should fail since the new remote_head is not fetched yet?!
                if module._diff:
                    diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
                    if diff:
                        result['diff'] = diff
                module.exit_json(**result)
            else:
                fetch(git_path, module, repo, dest, version, remote, depth, bare, refspec, git_version_used)

        result['after'] = get_version(module, git_path, dest)

        if result['before'] == result['after']:
            if local_mods:
                result.update(changed=True, after=remote_head, msg='Local modifications exist')
                # no diff, since the repo didn't change
                module.exit_json(**result)

    # switch to version specified regardless of whether
    # we got new revisions from the repository
    if not bare:
        switch_version(git_path, module, dest, remote, version, verify_commit, depth)

    # Deal with submodules
    submodules_updated = False
    if recursive and not bare:
        submodules_updated = submodules_fetch(git_path, module, remote, track_submodules, dest)
        if submodules_updated:
            result.update(submodules_changed=submodules_updated)

            if module.check_mode:
                result.update(changed=True, after=remote_head)
                module.exit_json(**result)

            # Switch to version specified
            submodule_update(git_path, module, dest, track_submodules, force=force)

    # determine if we changed anything
    result['after'] = get_version(module, git_path, dest)

    result.update(changed=False)
    if result['before'] != result['after'] or local_mods or submodules_updated or remote_url_changed:
        result.update(changed=True)
        if module._diff:
            diff = get_diff(module, git_path, dest, repo, remote, depth, bare, result['before'], result['after'])
            if diff:
                result['diff'] = diff

    # cleanup the wrapper script
    if ssh_wrapper:
        try:
            os.remove(ssh_wrapper)
        except OSError:
            # No need to fail if the file already doesn't exist
            pass

    module.exit_json(**result)
Пример #5
0
def run_module():
    module = AnsibleModule(
        argument_spec=dict(
            state=dict(default="present", choices=['present', 'absent']),
            name=dict(required=True),
            resource_class=dict(default="ocf", choices=['ocf', 'systemd', 'stonith', 'master', 'promotable']),
            resource_type=dict(required=False),
            options=dict(default="", required=False),
            force_resource_update=dict(default=False, type='bool', required=False),
            cib_file=dict(required=False),
            child_name=dict(required=False),
            ignored_meta_attributes=dict(required=False, type='list', elements='str', default=[]),
        ),
        supports_check_mode=True
    )

    state = module.params['state']
    resource_name = module.params['name']
    resource_class = module.params['resource_class']
    cib_file = module.params['cib_file']
    if 'child_name' in module.params and module.params['child_name'] is None:
        module.params['child_name'] = resource_name + '-child'
    child_name = module.params['child_name']
    resource_options = module.params['options']
    ignored_meta_attributes = module.params['ignored_meta_attributes']

    if state == 'present' and (not module.params['resource_type']):
        module.fail_json(msg='When creating cluster resource you must specify the resource_type')
    result = {}

    if find_executable('pcs') is None:
        module.fail_json(msg="'pcs' executable not found. Install 'pcs'.")

    # get the pcs major.minor version
    rc, out, err = module.run_command('pcs --version')
    if rc == 0:
        pcs_version = out.split('.')[0] + '.' + out.split('.')[1]
    else:
        module.fail_json(msg="pcs --version exited with non-zero exit code (" + rc + "): " + out + err)

    # check if 'master' and 'promotable' classes have the needed keyword in options
    if resource_class == 'master' and not ('--master' in resource_options or 'master' in resource_options):
        module.fail_json(msg='When creating Master/Slave resource you must specify keyword "master" or "--master" in "options"')
    if resource_class == 'promotable' and 'promotable' not in resource_options:
        module.fail_json(msg='When creating promotable resource you must specify keyword "promotable" in "options"')

    module.params['cib_file_param'] = ''
    if cib_file is not None:
        # use cib_file if specified
        if os.path.isfile(cib_file):
            try:
                current_cib = ET.parse(cib_file)
            except Exception as e:
                module.fail_json(msg="Error encountered parsing the cib_file - %s" % (e))
            current_cib_root = current_cib.getroot()
            module.params['cib_file_param'] = '-f ' + cib_file
        else:
            module.fail_json(msg="%(cib_file)s is not a file or doesn't exists" % module.params)
    else:
        # get running cluster configuration
        rc, out, err = module.run_command('pcs cluster cib')
        if rc == 0:
            current_cib_root = ET.fromstring(out)
        else:
            module.fail_json(msg='Failed to load cluster configuration', out=out, error=err)

    # try to find the resource that we seek
    resource = None
    cib_resources = current_cib_root.find('./configuration/resources')
    resource = find_resource(cib_resources, resource_name)

    if state == 'present' and resource is None:
        # resource should be present, but we don't see it in configuration - lets create it
        result['changed'] = True
        if not module.check_mode:
            if resource_class == 'stonith':
                cmd = 'pcs %(cib_file_param)s stonith create %(name)s %(resource_type)s %(options)s' % module.params
            elif resource_class == 'master' or resource_class == 'promotable':
                # we first create Master/Slave or Promotable resource with child_name and later rename it
                cmd = 'pcs %(cib_file_param)s resource create %(child_name)s %(resource_type)s %(options)s' % module.params
            else:
                cmd = 'pcs %(cib_file_param)s resource create %(name)s %(resource_type)s %(options)s' % module.params
            rc, out, err = module.run_command(cmd)
            if rc != 0 and "Call cib_replace failed (-62): Timer expired" in err:
                # EL6: special retry when we failed to create resource because of timer waiting on cib expired
                rc, out, err = module.run_command(cmd)
            if rc == 0:
                if resource_class == 'master' or resource_class == 'promotable':
                    # rename the resource to desirable name
                    rc, out, err = module.run_command('pcs cluster cib')
                    if rc == 0:
                        updated_cib_root = ET.fromstring(out)
                        multistate_resource = None
                        updated_cib_resources = updated_cib_root.find('./configuration/resources')
                        resource_suffix = '-master' if pcs_version == '0.9' else '-clone'
                        multistate_resource = find_resource(updated_cib_resources, child_name + resource_suffix)
                        if multistate_resource is not None:
                            rename_multistate_element(multistate_resource, resource_name, child_name, resource_suffix)
                            ##
                            # when not using cib_file then we continue preparing changes for cib-push into running cluster
                            new_cib = ET.ElementTree(updated_cib_root)
                            new_cib_fd, new_cib_path = tempfile.mkstemp()
                            module.add_cleanup_file(new_cib_path)
                            new_cib.write(new_cib_path)
                            push_scope = 'scope=resources' if module.params['force_resource_update'] else ''
                            push_cmd = 'pcs cluster cib-push ' + push_scope + ' ' + new_cib_path
                            rc, out, err = module.run_command(push_cmd)
                            if rc == 0:
                                module.exit_json(changed=True)
                            else:
                                # rollback the failed rename by deleting the multistate resource
                                cmd = 'pcs %(cib_file_param)s resource delete %(child_name)s' % module.params
                                rc2, out2, err2 = module.run_command(cmd)
                                if rc2 == 0:
                                    module.fail_json(msg="Failed to push updated configuration for multistate resource to cluster using command '" + push_cmd +
                                                     "'. Creation of multistate resource was rolled back. You can retry this task with " +
                                                     "'force_resource_update=true' to see if that helps.", output=out, error=err)
                                else:
                                    module.fail_json(msg="Failed to delete resource after unsuccessful multistate resource configuration update using command '"
                                                     + cmd + "'", output=out2, error=err2)
                        else:
                            module.fail_json(msg="Failed to detect multistate resource after creating it with cmd '" + cmd + "'!",
                                             output=out, error=err, previous_cib=current_cib)
                module.exit_json(changed=True)
            else:
                module.fail_json(msg="Failed to create resource using command '" + cmd + "'", output=out, error=err)

    elif state == 'present' and resource is not None:
        # resource should be present and we have find resource with such ID - lets compare it with definition if it needs a change

        # lets simulate how the resource would look like if it was created using command we have
        clean_cib_fd, clean_cib_path = tempfile.mkstemp()
        module.add_cleanup_file(clean_cib_path)
        module.do_cleanup_files()
        # we must be sure that clean_cib_path is empty
        if resource_class == 'stonith':
            cmd = 'pcs -f ' + clean_cib_path + ' stonith create %(name)s %(resource_type)s %(options)s' % module.params
        elif resource_class == 'master' or resource_class == 'promotable':
            # we first create Master/Slave or Promotable resource with child_name and later rename it
            cmd = 'pcs -f ' + clean_cib_path + ' resource create %(child_name)s %(resource_type)s %(options)s' % module.params
        else:
            cmd = 'pcs -f ' + clean_cib_path + ' resource create %(name)s %(resource_type)s %(options)s' % module.params
        rc, out, err = module.run_command(cmd)
        if rc == 0:
            if resource_class == 'master' or resource_class == 'promotable':
                # deal with multistate resources
                clean_cib = ET.parse(clean_cib_path)
                clean_cib_root = clean_cib.getroot()
                multistate_resource = None
                updated_cib_resources = clean_cib_root.find('./configuration/resources')
                resource_suffix = '-master' if pcs_version == '0.9' else '-clone'
                multistate_resource = find_resource(updated_cib_resources, child_name + resource_suffix)
                if multistate_resource is not None:
                    rename_multistate_element(multistate_resource, resource_name, child_name, resource_suffix)
                    # we try to write the changes into temporary cib_file
                    try:
                        clean_cib.write(clean_cib_path)
                    except Exception as e:
                        module.fail_json(msg="Error encountered writing intermediate multistate result to clean_cib_path - %s" % (e))
                else:
                    module.fail_json(msg="Failed to detect intermediate multistate resource after creating it with cmd '" + cmd + "'!",
                                     output=out, error=err, previous_cib=current_cib)

            # we have a comparable resource created in clean cluster, so lets select it and compare it
            clean_cib = ET.parse(clean_cib_path)
            clean_cib_root = clean_cib.getroot()
            clean_resource = None
            cib_clean_resources = clean_cib_root.find('./configuration/resources')
            clean_resource = find_resource(cib_clean_resources, resource_name)

            if clean_resource is not None:
                # cleanup the definition of resource and clean_resource before comparison
                remove_ignored_meta_attributes(resource, ignored_meta_attributes)
                remove_empty_meta_attributes_tag(resource)

                remove_ignored_meta_attributes(clean_resource, ignored_meta_attributes)
                remove_empty_meta_attributes_tag(clean_resource)

                # compare the existing resource in cluster and simulated clean_resource
                rc, diff = compare_resources(module, resource, clean_resource)
                if rc == 0:
                    # if no differnces were find there is no need to update the resource
                    module.exit_json(changed=False)
                else:
                    # otherwise lets replace the resource with new one
                    result['changed'] = True
                    result['diff'] = diff
                    if not module.check_mode:
                        replace_element(resource, clean_resource)
                        # when we use cib_file then we can dump the changed CIB directly into file
                        if cib_file is not None:
                            try:
                                current_cib.write(cib_file)  # FIXME add try/catch for writing into file
                            except Exception as e:
                                module.fail_json(msg="Error encountered writing result to cib_file - %s" % (e))
                            module.exit_json(changed=True)
                        # when not using cib_file then we continue preparing changes for cib-push into running cluster
                        new_cib = ET.ElementTree(current_cib_root)
                        new_cib_fd, new_cib_path = tempfile.mkstemp()
                        module.add_cleanup_file(new_cib_path)
                        new_cib.write(new_cib_path)
                        push_scope = 'scope=resources' if module.params['force_resource_update'] else ''
                        push_cmd = 'pcs cluster cib-push ' + push_scope + ' ' + new_cib_path
                        rc, out, err = module.run_command(push_cmd)
                        if rc == 0:
                            module.exit_json(changed=True)
                        else:
                            module.fail_json(msg="Failed to push updated configuration to cluster using command '" + push_cmd + "'", output=out, error=err)
            else:
                module.fail_json(msg="Unable to find simulated resource, This is most probably a bug.")
        else:
            module.fail_json(msg="Unable to simulate resource with given definition using command '" + cmd + "'", output=out, error=err)

    elif state == 'absent' and resource is not None:
        # resource should not be present but we have found something - lets remove that
        result['changed'] = True
        if not module.check_mode:
            if resource_class == 'stonith':
                cmd = 'pcs %(cib_file_param)s stonith delete %(name)s' % module.params
            else:
                cmd = 'pcs %(cib_file_param)s resource delete %(name)s' % module.params
            rc, out, err = module.run_command(cmd)
            if rc == 0:
                module.exit_json(changed=True)
            else:
                module.fail_json(msg="Failed to delete resource using command '" + cmd + "'", output=out, error=err)

    else:
        # resource should not be present and is nto there, nothing to do
        result['changed'] = False

    # END of module
    module.exit_json(**result)