Exemplo n.º 1
0
def preflight_validation(bin_path,
                         project_path,
                         version,
                         variables_args=None,
                         plan_file=None):
    if project_path is None or '/' not in project_path:
        module.fail_json(
            msg="Path for Terraform project can not be None or ''.")
    if not os.path.exists(bin_path):
        module.fail_json(
            msg=
            "Path for Terraform binary '{0}' doesn't exist on this host - check the path and try again please."
            .format(bin_path))
    if not os.path.isdir(project_path):
        module.fail_json(
            msg=
            "Path for Terraform project '{0}' doesn't exist on this host - check the path and try again please."
            .format(project_path))
    if LooseVersion(version) < LooseVersion('0.15.0'):
        rc, out, err = module.run_command([bin_path, 'validate'] +
                                          variables_args,
                                          check_rc=True,
                                          cwd=project_path)
    else:
        rc, out, err = module.run_command([bin_path, 'validate'],
                                          check_rc=True,
                                          cwd=project_path)
Exemplo n.º 2
0
def install_flat(module, binary, remote, names, method, no_dependencies):
    """Add new flatpaks."""
    global result
    uri_names = []
    id_names = []
    for name in names:
        if name.startswith('http://') or name.startswith('https://'):
            uri_names.append(name)
        else:
            id_names.append(name)
    base_command = [binary, "install", "--{0}".format(method)]
    flatpak_version = _flatpak_version(module, binary)
    if LooseVersion(flatpak_version) < LooseVersion('1.1.3'):
        base_command += ["-y"]
    else:
        base_command += ["--noninteractive"]
    if no_dependencies:
        base_command += ["--no-deps"]
    if uri_names:
        command = base_command + uri_names
        _flatpak_command(module, module.check_mode, command)
    if id_names:
        command = base_command + [remote] + id_names
        _flatpak_command(module, module.check_mode, command)
    result['changed'] = True
Exemplo n.º 3
0
def remove_plugin(module,
                  plugin_bin,
                  plugin_name,
                  allow_root,
                  kibana_version='4.6'):
    if LooseVersion(kibana_version) > LooseVersion('4.6'):
        kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin),
                                         'kibana-plugin')
        cmd_args = [kibana_plugin_bin, "remove", plugin_name]
    else:
        cmd_args = [
            plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name
        ]

    if allow_root:
        cmd_args.append('--allow-root')

    if module.check_mode:
        return True, " ".join(cmd_args), "check mode", ""

    rc, out, err = module.run_command(cmd_args)
    if rc != 0:
        reason = parse_error(out)
        module.fail_json(msg=reason)

    return True, " ".join(cmd_args), out, err
Exemplo n.º 4
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            dependencies=dict(type='list', elements='str', default=[])
        ),
        supports_check_mode=True,
    )
    if not HAS_DISTUTILS:
        module.fail_json(
            msg='Could not import "distutils" and "pkg_resources" libraries to introspect python environment.',
            python=sys.executable,
            python_version=sys.version,
            python_version_info=python_version_info,
            python_system_path=sys.path,
        )
    pkg_dep_re = re.compile(r'(^[a-zA-Z][a-zA-Z0-9_-]+)(?:(==|[><]=?)([0-9.]+))?$')

    results = dict(
        not_found=[],
        mismatched={},
        valid={},
    )

    for dep in module.params['dependencies']:
        match = pkg_dep_re.match(dep)
        if not match:
            module.fail_json(msg="Failed to parse version requirement '{0}'. Must be formatted like 'ansible>2.6'".format(dep))
        pkg, op, version = match.groups()
        if op is not None and op not in operations:
            module.fail_json(msg="Failed to parse version requirement '{0}'. Operator must be one of >, <, <=, >=, or ==".format(dep))
        try:
            existing = pkg_resources.get_distribution(pkg).version
        except pkg_resources.DistributionNotFound:
            # not there
            results['not_found'].append(pkg)
            continue
        if op is None and version is None:
            results['valid'][pkg] = {
                'installed': existing,
                'desired': None,
            }
        elif operations[op](LooseVersion(existing), LooseVersion(version)):
            results['valid'][pkg] = {
                'installed': existing,
                'desired': dep,
            }
        else:
            results['mismatched'][pkg] = {
                'installed': existing,
                'desired': dep,
            }

    module.exit_json(
        python=sys.executable,
        python_version=sys.version,
        python_version_info=python_version_info,
        python_system_path=sys.path,
        **results
    )
Exemplo n.º 5
0
 def from_package(psutil):
     version = LooseVersion(psutil.__version__)
     if version < LooseVersion('2.0.0'):
         return PSAdapter100(psutil)
     elif version < LooseVersion('5.3.0'):
         return PSAdapter200(psutil)
     else:
         return PSAdapter530(psutil)
Exemplo n.º 6
0
def gitlab_authentication(module):
    gitlab_url = module.params['api_url']
    validate_certs = module.params['validate_certs']
    gitlab_user = module.params['api_username']
    gitlab_password = module.params['api_password']
    gitlab_token = module.params['api_token']
    gitlab_oauth_token = module.params['api_oauth_token']
    gitlab_job_token = module.params['api_job_token']

    if not HAS_GITLAB_PACKAGE:
        module.fail_json(msg=missing_required_lib("python-gitlab"),
                         exception=GITLAB_IMP_ERR)

    try:
        # python-gitlab library remove support for username/password authentication since 1.13.0
        # Changelog : https://github.com/python-gitlab/python-gitlab/releases/tag/v1.13.0
        # This condition allow to still support older version of the python-gitlab library
        if LooseVersion(gitlab.__version__) < LooseVersion("1.13.0"):
            gitlab_instance = gitlab.Gitlab(url=gitlab_url,
                                            ssl_verify=validate_certs,
                                            email=gitlab_user,
                                            password=gitlab_password,
                                            private_token=gitlab_token,
                                            api_version=4)
        else:
            # We can create an oauth_token using a username and password
            # https://docs.gitlab.com/ee/api/oauth2.html#authorization-code-flow
            if gitlab_user:
                data = {
                    'grant_type': 'password',
                    'username': gitlab_user,
                    'password': gitlab_password
                }
                resp = requests.post(urljoin(gitlab_url, "oauth/token"),
                                     data=data,
                                     verify=validate_certs)
                resp_data = resp.json()
                gitlab_oauth_token = resp_data["access_token"]

            gitlab_instance = gitlab.Gitlab(url=gitlab_url,
                                            ssl_verify=validate_certs,
                                            private_token=gitlab_token,
                                            oauth_token=gitlab_oauth_token,
                                            job_token=gitlab_job_token,
                                            api_version=4)

        gitlab_instance.auth()
    except (gitlab.exceptions.GitlabAuthenticationError,
            gitlab.exceptions.GitlabGetError) as e:
        module.fail_json(msg="Failed to connect to GitLab server: %s" %
                         to_native(e))
    except (gitlab.exceptions.GitlabHttpError) as e:
        module.fail_json(msg="Failed to connect to GitLab server: %s. \
            GitLab remove Session API now that private tokens are removed from user API endpoints since version 10.2."
                         % to_native(e))

    return gitlab_instance
Exemplo n.º 7
0
def present(dest, username, password, crypt_scheme, create, check_mode):
    """ Ensures user is present

    Returns (msg, changed) """
    if crypt_scheme in apache_hashes:
        context = htpasswd_context
    else:
        context = CryptContext(schemes=[crypt_scheme] + apache_hashes)
    if not os.path.exists(dest):
        if not create:
            raise ValueError('Destination %s does not exist' % dest)
        if check_mode:
            return ("Create %s" % dest, True)
        create_missing_directories(dest)
        if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
            ht = HtpasswdFile(dest,
                              new=True,
                              default_scheme=crypt_scheme,
                              context=context)
        else:
            ht = HtpasswdFile(dest,
                              autoload=False,
                              default=crypt_scheme,
                              context=context)
        if getattr(ht, 'set_password', None):
            ht.set_password(username, password)
        else:
            ht.update(username, password)
        ht.save()
        return ("Created %s and added %s" % (dest, username), True)
    else:
        if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
            ht = HtpasswdFile(dest,
                              new=False,
                              default_scheme=crypt_scheme,
                              context=context)
        else:
            ht = HtpasswdFile(dest, default=crypt_scheme, context=context)

        found = None
        if getattr(ht, 'check_password', None):
            found = ht.check_password(username, password)
        else:
            found = ht.verify(username, password)

        if found:
            return ("%s already present" % username, False)
        else:
            if not check_mode:
                if getattr(ht, 'set_password', None):
                    ht.set_password(username, password)
                else:
                    ht.update(username, password)
                ht.save()
            return ("Add/update %s" % username, True)
Exemplo n.º 8
0
def check_requests_dep(module):
    """Check if an adequate requests version is available"""
    if not HAS_REQUESTS:
        module.fail_json(msg=missing_required_lib('requests'),
                         exception=REQUESTS_IMP_ERR)
    else:
        required_version = '2.0.0' if PY3 else '1.0.0'
        if LooseVersion(requests.__version__) < LooseVersion(required_version):
            module.fail_json(
                msg="'requests' library version should be >= %s, found: %s." %
                (required_version, requests.__version__))
Exemplo n.º 9
0
    def check_mas_tool(self):
        ''' Verifies that the `mas` tool is available in a recent version '''

        # Is the `mas` tool available at all?
        if not self.mas_path:
            self.module.fail_json(msg='Required `mas` tool is not installed')

        # Is the version recent enough?
        rc, out, err = self.run(['version'])
        if rc != 0 or not out.strip() or LooseVersion(
                out.strip()) < LooseVersion('1.5.0'):
            self.module.fail_json(
                msg='`mas` tool in version 1.5.0+ needed, got ' + out.strip())
Exemplo n.º 10
0
 def __init__(self, module):
     super(F2fs, self).__init__(module)
     mkfs = self.module.get_bin_path(self.MKFS, required=True)
     dummy, out, dummy = self.module.run_command(
         [mkfs, os.devnull], check_rc=False, environ_update=self.LANG_ENV)
     # Looking for "	F2FS-tools: mkfs.f2fs Ver: 1.10.0 (2018-01-30)"
     # mkfs.f2fs displays version since v1.2.0
     match = re.search(r"F2FS-tools: mkfs.f2fs Ver: ([0-9.]+) \(", out)
     if match is not None:
         # Since 1.9.0, mkfs.f2fs check overwrite before make filesystem
         # before that version -f switch wasn't used
         if LooseVersion(match.group(1)) >= LooseVersion('1.9.0'):
             self.MKFS_FORCE_FLAGS = ['-f']
Exemplo n.º 11
0
    def __init__(self, module):
        """
        Construct module
        """
        self.module = module
        if not CLC_FOUND:
            self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
        if not REQUESTS_FOUND:
            self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
        if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
            self.module.fail_json(
                msg='requests library  version should be >= 2.5.0')

        self._set_user_agent(self.clc)
Exemplo n.º 12
0
def uninstall_flat(module, binary, names, method):
    """Remove existing flatpaks."""
    global result
    installed_flat_names = [
        _match_installed_flat_name(module, binary, name, method)
        for name in names
    ]
    command = [binary, "uninstall"]
    flatpak_version = _flatpak_version(module, binary)
    if LooseVersion(flatpak_version) < LooseVersion('1.1.3'):
        command += ["-y"]
    else:
        command += ["--noninteractive"]
    command += ["--{0}".format(method)] + installed_flat_names
    _flatpak_command(module, module.check_mode, command)
    result['changed'] = True
Exemplo n.º 13
0
def merge_hash_wrapper(x, y, recursive=False, list_merge='replace'):
    ''' Wrapper of the function merge_hash from ansible.utils.vars. Only 2 paramaters are allowed
        for Ansible 2.9 and lower.'''

    if LooseVersion(ansible_version) < LooseVersion('2.10'):
        if list_merge != 'replace' or recursive:
            msg = (
                "Non default options of list_merge(default=replace) or recursive(default=False) "
                "are not allowed in Ansible version 2.9 or lower. Ansible version is %s, "
                "recursive=%s, and list_merge=%s.")
            raise AnsibleFilterError(msg %
                                     (ansible_version, recursive, list_merge))
        else:
            return merge_hash(x, y)
    else:
        return merge_hash(x, y, recursive, list_merge)
Exemplo n.º 14
0
def absent(dest, username, check_mode):
    """ Ensures user is absent

    Returns (msg, changed) """
    if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
        ht = HtpasswdFile(dest, new=False)
    else:
        ht = HtpasswdFile(dest)

    if username not in ht.users():
        return ("%s not present" % username, False)
    else:
        if not check_mode:
            ht.delete(username)
            ht.save()
        return ("Remove %s" % username, True)
Exemplo n.º 15
0
def addmodify_repo(module, repodata, old_repos, zypper_version, warnings):
    "Adds the repo, removes old repos before, that would conflict."
    repo = repodata['url']
    cmd = _get_cmd(module, 'addrepo', '--check')
    if repodata['name']:
        cmd.extend(['--name', repodata['name']])

    # priority on addrepo available since 1.12.25
    # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L327-L336
    if repodata['priority']:
        if zypper_version >= LooseVersion('1.12.25'):
            cmd.extend(['--priority', str(repodata['priority'])])
        else:
            warnings.append(
                "Setting priority only available for zypper >= 1.12.25. Ignoring priority argument."
            )

    if repodata['enabled'] == '0':
        cmd.append('--disable')

    # gpgcheck available since 1.6.2
    # https://github.com/openSUSE/zypper/blob/b9b3cb6db76c47dc4c47e26f6a4d2d4a0d12b06d/package/zypper.changes#L2446-L2449
    # the default changed in the past, so don't assume a default here and show warning for old zypper versions
    if zypper_version >= LooseVersion('1.6.2'):
        if repodata['gpgcheck'] == '1':
            cmd.append('--gpgcheck')
        else:
            cmd.append('--no-gpgcheck')
    else:
        warnings.append(
            "Enabling/disabling gpgcheck only available for zypper >= 1.6.2. Using zypper default value."
        )

    if repodata['autorefresh'] == '1':
        cmd.append('--refresh')

    cmd.append(repo)

    if not repo.endswith('.repo'):
        cmd.append(repodata['alias'])

    if old_repos is not None:
        for oldrepo in old_repos:
            remove_repo(module, oldrepo['url'])

    rc, stdout, stderr = module.run_command(cmd, check_rc=False)
    return rc, stdout, stderr
Exemplo n.º 16
0
 def __init__(self, module):
     super(Btrfs, self).__init__(module)
     mkfs = self.module.get_bin_path(self.MKFS, required=True)
     dummy, stdout, stderr = self.module.run_command([mkfs, '--version'],
                                                     check_rc=True)
     match = re.search(r" v([0-9.]+)", stdout)
     if not match:
         # v0.20-rc1 use stderr
         match = re.search(r" v([0-9.]+)", stderr)
     if match:
         # v0.20-rc1 doesn't have --force parameter added in following version v3.12
         if LooseVersion(match.group(1)) >= LooseVersion('3.12'):
             self.MKFS_FORCE_FLAGS = ['-f']
     else:
         # assume version is greater or equal to 3.12
         self.MKFS_FORCE_FLAGS = ['-f']
         self.module.warn('Unable to identify mkfs.btrfs version (%r, %r)' %
                          (stdout, stderr))
Exemplo n.º 17
0
def install_plugin(module,
                   plugin_bin,
                   plugin_name,
                   url,
                   timeout,
                   allow_root,
                   kibana_version='4.6'):
    if LooseVersion(kibana_version) > LooseVersion('4.6'):
        kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin),
                                         'kibana-plugin')
        cmd_args = [kibana_plugin_bin, "install"]
        if url:
            cmd_args.append(url)
        else:
            cmd_args.append(plugin_name)
    else:
        cmd_args = [
            plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name
        ]

        if url:
            cmd_args.extend(["--url", url])

    if timeout:
        cmd_args.extend(["--timeout", timeout])

    if allow_root:
        cmd_args.append('--allow-root')

    if module.check_mode:
        return True, " ".join(cmd_args), "check mode", ""

    rc, out, err = module.run_command(cmd_args)
    if rc != 0:
        reason = parse_error(out)
        module.fail_json(msg=reason)

    return True, " ".join(cmd_args), out, err
Exemplo n.º 18
0
    def connect_to_influxdb(self):
        args = dict(
            host=self.hostname,
            port=self.port,
            username=self.username,
            password=self.password,
            database=self.database_name,
            ssl=self.params['ssl'],
            verify_ssl=self.params['validate_certs'],
            timeout=self.params['timeout'],
            use_udp=self.params['use_udp'],
            udp_port=self.params['udp_port'],
            proxies=self.params['proxies'],
        )
        influxdb_api_version = LooseVersion(influxdb_version)
        if influxdb_api_version >= LooseVersion('4.1.0'):
            # retries option is added in version 4.1.0
            args.update(retries=self.params['retries'])

        if influxdb_api_version >= LooseVersion('5.1.0'):
            # path argument is added in version 5.1.0
            args.update(path=self.path)

        return InfluxDBClient(**args)
Exemplo n.º 19
0
 def _brew_cask_command_is_deprecated(self):
     # The `brew cask` replacements were fully available in 2.6.0 (https://brew.sh/2020/12/01/homebrew-2.6.0/)
     return LooseVersion(self._get_brew_version()) >= LooseVersion('2.6.0')
Exemplo n.º 20
0
def main():
    argument_spec = basic_auth_argument_spec()
    argument_spec.update(auth_argument_spec())
    argument_spec.update(
        project=dict(type='str', required=True),
        branch=dict(type='str', required=True),
        ref_branch=dict(type='str', required=False),
        state=dict(type='str',
                   default="present",
                   choices=["absent", "present"]),
    )

    module = AnsibleModule(argument_spec=argument_spec,
                           mutually_exclusive=[
                               ['api_username', 'api_token'],
                               ['api_username', 'api_oauth_token'],
                               ['api_username', 'api_job_token'],
                               ['api_token', 'api_oauth_token'],
                               ['api_token', 'api_job_token'],
                           ],
                           required_together=[
                               ['api_username', 'api_password'],
                           ],
                           required_one_of=[[
                               'api_username', 'api_token', 'api_oauth_token',
                               'api_job_token'
                           ]],
                           required_if=[
                               ['state', 'present', ['ref_branch'], True],
                           ],
                           supports_check_mode=False)

    project = module.params['project']
    branch = module.params['branch']
    ref_branch = module.params['ref_branch']
    state = module.params['state']

    if not HAS_GITLAB_PACKAGE:
        module.fail_json(msg=missing_required_lib("python-gitlab"),
                         exception=GITLAB_IMP_ERR)

    gitlab_version = gitlab.__version__
    if LooseVersion(gitlab_version) < LooseVersion('2.3.0'):
        module.fail_json(
            msg=
            "community.general.gitlab_proteched_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])."
            " Please upgrade python-gitlab to version 2.3.0 or above." %
            gitlab_version)

    gitlab_instance = gitlab_authentication(module)
    this_gitlab = GitlabBranch(module=module,
                               project=project,
                               gitlab_instance=gitlab_instance)

    this_branch = this_gitlab.get_branch(branch)

    if not this_branch and state == "present":
        r_branch = this_gitlab.get_branch(ref_branch)
        if not r_branch:
            module.fail_json(msg="Ref branch {b} not exist.".format(
                b=ref_branch))
        this_gitlab.create_branch(branch, ref_branch)
        module.exit_json(changed=True,
                         msg="Created the branch {b}.".format(b=branch))
    elif this_branch and state == "present":
        module.exit_json(changed=False,
                         msg="Branch {b} already exist".format(b=branch))
    elif this_branch and state == "absent":
        try:
            this_gitlab.delete_branch(this_branch)
            module.exit_json(changed=True,
                             msg="Branch {b} deleted.".format(b=branch))
        except Exception as e:
            module.fail_json(msg="Error delete branch.",
                             exception=traceback.format_exc())
    else:
        module.exit_json(changed=False, msg="No changes are needed.")
Exemplo n.º 21
0
def main():
    argument_spec = basic_auth_argument_spec()
    argument_spec.update(auth_argument_spec())
    argument_spec.update(
        project=dict(type='str', required=True),
        name=dict(type='str', required=True),
        merge_access_levels=dict(type='str',
                                 default="maintainer",
                                 choices=["maintainer", "developer",
                                          "nobody"]),
        push_access_level=dict(type='str',
                               default="maintainer",
                               choices=["maintainer", "developer", "nobody"]),
        state=dict(type='str',
                   default="present",
                   choices=["absent", "present"]),
    )

    module = AnsibleModule(argument_spec=argument_spec,
                           mutually_exclusive=[
                               ['api_username', 'api_token'],
                               ['api_username', 'api_oauth_token'],
                               ['api_username', 'api_job_token'],
                               ['api_token', 'api_oauth_token'],
                               ['api_token', 'api_job_token'],
                           ],
                           required_together=[
                               ['api_username', 'api_password'],
                           ],
                           required_one_of=[[
                               'api_username', 'api_token', 'api_oauth_token',
                               'api_job_token'
                           ]],
                           supports_check_mode=True)

    project = module.params['project']
    name = module.params['name']
    merge_access_levels = module.params['merge_access_levels']
    push_access_level = module.params['push_access_level']
    state = module.params['state']

    if not HAS_GITLAB_PACKAGE:
        module.fail_json(msg=missing_required_lib("python-gitlab"),
                         exception=GITLAB_IMP_ERR)

    gitlab_version = gitlab.__version__
    if LooseVersion(gitlab_version) < LooseVersion('2.3.0'):
        module.fail_json(
            msg=
            "community.general.gitlab_proteched_branch requires python-gitlab Python module >= 2.3.0 (installed version: [%s])."
            " Please upgrade python-gitlab to version 2.3.0 or above." %
            gitlab_version)

    gitlab_instance = gitlab_authentication(module)
    this_gitlab = GitlabProtectedBranch(module=module,
                                        project=project,
                                        gitlab_instance=gitlab_instance)

    p_branch = this_gitlab.protected_branch_exist(name=name)
    if not p_branch and state == "present":
        this_gitlab.create_protected_branch(
            name=name,
            merge_access_levels=merge_access_levels,
            push_access_level=push_access_level)
        module.exit_json(changed=True, msg="Created the proteched branch.")
    elif p_branch and state == "present":
        if not this_gitlab.compare_protected_branch(name, merge_access_levels,
                                                    push_access_level):
            this_gitlab.delete_protected_branch(name=name)
            this_gitlab.create_protected_branch(
                name=name,
                merge_access_levels=merge_access_levels,
                push_access_level=push_access_level)
            module.exit_json(changed=True,
                             msg="Recreated the proteched branch.")
    elif p_branch and state == "absent":
        this_gitlab.delete_protected_branch(name=name)
        module.exit_json(changed=True, msg="Deleted the proteched branch.")
    module.exit_json(changed=False, msg="No changes are needed.")
Exemplo n.º 22
0
def proxmox_version(proxmox):
    apireturn = proxmox.version.get()
    return LooseVersion(apireturn['version'])
Exemplo n.º 23
0
from ansible.module_utils.common._collections_compat import MutableMapping

from ansible.errors import AnsibleError
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.utils.display import Display
from ansible.template import Templar

from ansible_collections.community.general.plugins.module_utils.version import LooseVersion

# 3rd party imports
try:
    import requests
    if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
        raise ImportError
    HAS_REQUESTS = True
except ImportError:
    HAS_REQUESTS = False

display = Display()


class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
    ''' Host inventory parser for ansible using Proxmox as source. '''

    NAME = 'community.general.proxmox'

    def __init__(self):
Exemplo n.º 24
0
def main():
    global module
    module = AnsibleModule(
        argument_spec=dict(
            project_path=dict(required=True, type='path'),
            binary_path=dict(type='path'),
            plugin_paths=dict(type='list', elements='path'),
            workspace=dict(type='str', default='default'),
            purge_workspace=dict(type='bool', default=False),
            state=dict(default='present',
                       choices=['present', 'absent', 'planned']),
            variables=dict(type='dict'),
            variables_files=dict(aliases=['variables_file'],
                                 type='list',
                                 elements='path'),
            plan_file=dict(type='path'),
            state_file=dict(type='path'),
            targets=dict(type='list', elements='str', default=[]),
            lock=dict(type='bool', default=True),
            lock_timeout=dict(type='int', ),
            force_init=dict(type='bool', default=False),
            backend_config=dict(type='dict'),
            backend_config_files=dict(type='list', elements='path'),
            init_reconfigure=dict(type='bool', default=False),
            overwrite_init=dict(type='bool', default=True),
            check_destroy=dict(type='bool', default=False),
            parallelism=dict(type='int'),
            provider_upgrade=dict(type='bool', default=False),
        ),
        required_if=[('state', 'planned', ['plan_file'])],
        supports_check_mode=True,
    )

    project_path = module.params.get('project_path')
    bin_path = module.params.get('binary_path')
    plugin_paths = module.params.get('plugin_paths')
    workspace = module.params.get('workspace')
    purge_workspace = module.params.get('purge_workspace')
    state = module.params.get('state')
    variables = module.params.get('variables') or {}
    variables_files = module.params.get('variables_files')
    plan_file = module.params.get('plan_file')
    state_file = module.params.get('state_file')
    force_init = module.params.get('force_init')
    backend_config = module.params.get('backend_config')
    backend_config_files = module.params.get('backend_config_files')
    init_reconfigure = module.params.get('init_reconfigure')
    overwrite_init = module.params.get('overwrite_init')
    check_destroy = module.params.get('check_destroy')
    provider_upgrade = module.params.get('provider_upgrade')

    if bin_path is not None:
        command = [bin_path]
    else:
        command = [module.get_bin_path('terraform', required=True)]

    checked_version = get_version(command[0])

    if LooseVersion(checked_version) < LooseVersion('0.15.0'):
        DESTROY_ARGS = ('destroy', '-no-color', '-force')
        APPLY_ARGS = ('apply', '-no-color', '-input=false',
                      '-auto-approve=true')
    else:
        DESTROY_ARGS = ('destroy', '-no-color', '-auto-approve')
        APPLY_ARGS = ('apply', '-no-color', '-input=false', '-auto-approve')

    if force_init:
        if overwrite_init or not os.path.isfile(
                os.path.join(project_path, ".terraform", "terraform.tfstate")):
            init_plugins(command[0], project_path, backend_config,
                         backend_config_files, init_reconfigure,
                         provider_upgrade, plugin_paths)

    workspace_ctx = get_workspace_context(command[0], project_path)
    if workspace_ctx["current"] != workspace:
        if workspace not in workspace_ctx["all"]:
            create_workspace(command[0], project_path, workspace)
        else:
            select_workspace(command[0], project_path, workspace)

    if state == 'present':
        command.extend(APPLY_ARGS)
    elif state == 'absent':
        command.extend(DESTROY_ARGS)

    if state == 'present' and module.params.get('parallelism') is not None:
        command.append('-parallelism=%d' % module.params.get('parallelism'))

    variables_args = []
    for k, v in variables.items():
        variables_args.extend(['-var', '{0}={1}'.format(k, v)])
    if variables_files:
        for f in variables_files:
            variables_args.extend(['-var-file', f])

    preflight_validation(command[0], project_path, checked_version,
                         variables_args)

    if module.params.get('lock') is not None:
        if module.params.get('lock'):
            command.append('-lock=true')
        else:
            command.append('-lock=false')
    if module.params.get('lock_timeout') is not None:
        command.append('-lock-timeout=%ds' % module.params.get('lock_timeout'))

    for t in (module.params.get('targets') or []):
        command.extend(['-target', t])

    # we aren't sure if this plan will result in changes, so assume yes
    needs_application, changed = True, False

    out, err = '', ''

    if state == 'absent':
        command.extend(variables_args)
    elif state == 'present' and plan_file:
        if any([
                os.path.isfile(project_path + "/" + plan_file),
                os.path.isfile(plan_file)
        ]):
            command.append(plan_file)
        else:
            module.fail_json(
                msg=
                'Could not find plan_file "{0}", check the path and try again.'
                .format(plan_file))
    else:
        plan_file, needs_application, out, err, command = build_plan(
            command, project_path, variables_args, state_file,
            module.params.get('targets'), state, APPLY_ARGS, plan_file)
        if state == 'present' and check_destroy and '- destroy' in out:
            module.fail_json(
                msg="Aborting command because it would destroy some resources. "
                "Consider switching the 'check_destroy' to false to suppress this error"
            )
        command.append(plan_file)

    if needs_application and not module.check_mode and state != 'planned':
        rc, out, err = module.run_command(command,
                                          check_rc=False,
                                          cwd=project_path)
        if rc != 0:
            if workspace_ctx["current"] != workspace:
                select_workspace(command[0], project_path,
                                 workspace_ctx["current"])
            module.fail_json(msg=err.rstrip(),
                             rc=rc,
                             stdout=out,
                             stdout_lines=out.splitlines(),
                             stderr=err,
                             stderr_lines=err.splitlines(),
                             cmd=' '.join(command))
        # checks out to decide if changes were made during execution
        if ' 0 added, 0 changed' not in out and not state == "absent" or ' 0 destroyed' not in out:
            changed = True

    outputs_command = [command[0], 'output', '-no-color', '-json'
                       ] + _state_args(state_file)
    rc, outputs_text, outputs_err = module.run_command(outputs_command,
                                                       cwd=project_path)
    if rc == 1:
        module.warn(
            "Could not get Terraform outputs. This usually means none have been defined.\nstdout: {0}\nstderr: {1}"
            .format(outputs_text, outputs_err))
        outputs = {}
    elif rc != 0:
        module.fail_json(msg="Failure when getting Terraform outputs. "
                         "Exited {0}.\nstdout: {1}\nstderr: {2}".format(
                             rc, outputs_text, outputs_err),
                         command=' '.join(outputs_command))
    else:
        outputs = json.loads(outputs_text)

    # Restore the Terraform workspace found when running the module
    if workspace_ctx["current"] != workspace:
        select_workspace(command[0], project_path, workspace_ctx["current"])
    if state == 'absent' and workspace != 'default' and purge_workspace is True:
        remove_workspace(command[0], project_path, workspace)

    module.exit_json(changed=changed,
                     state=state,
                     workspace=workspace,
                     outputs=outputs,
                     stdout=out,
                     stderr=err,
                     command=' '.join(command))
Exemplo n.º 25
0
    def create_instance(self, vmid, node, disk, storage, cpus, memory, swap,
                        timeout, clone, **kwargs):
        proxmox_node = self.proxmox_api.nodes(node)

        # Remove all empty kwarg entries
        kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)

        if VZ_TYPE == 'lxc':
            kwargs['cpulimit'] = cpus
            kwargs['rootfs'] = disk
            if 'netif' in kwargs:
                kwargs.update(kwargs['netif'])
                del kwargs['netif']
            if 'mounts' in kwargs:
                kwargs.update(kwargs['mounts'])
                del kwargs['mounts']
            if 'pubkey' in kwargs:
                if self.version() >= LooseVersion('4.2'):
                    kwargs['ssh-public-keys'] = kwargs['pubkey']
                del kwargs['pubkey']
        else:
            kwargs['cpus'] = cpus
            kwargs['disk'] = disk

        if clone is not None:
            if VZ_TYPE != 'lxc':
                self.module.fail_json(
                    changed=False,
                    msg=
                    "Clone operator is only supported for LXC enabled proxmox clusters."
                )

            clone_is_template = self.is_template_container(node, clone)

            # By default, create a full copy only when the cloned container is not a template.
            create_full_copy = not clone_is_template

            # Only accept parameters that are compatible with the clone endpoint.
            valid_clone_parameters = ['hostname', 'pool', 'description']
            if self.module.params['storage'] is not None and clone_is_template:
                # Cloning a template, so create a full copy instead of a linked copy
                create_full_copy = True
            elif self.module.params[
                    'storage'] is None and not clone_is_template:
                # Not cloning a template, but also no defined storage. This isn't possible.
                self.module.fail_json(
                    changed=False,
                    msg=
                    "Cloned container is not a template, storage needs to be specified."
                )

            if self.module.params['clone_type'] == 'linked':
                if not clone_is_template:
                    self.module.fail_json(
                        changed=False,
                        msg=
                        "'linked' clone type is specified, but cloned container is not a template container."
                    )
                # Don't need to do more, by default create_full_copy is set to false already
            elif self.module.params['clone_type'] == 'opportunistic':
                if not clone_is_template:
                    # Cloned container is not a template, so we need our 'storage' parameter
                    valid_clone_parameters.append('storage')
            elif self.module.params['clone_type'] == 'full':
                create_full_copy = True
                valid_clone_parameters.append('storage')

            clone_parameters = {}

            if create_full_copy:
                clone_parameters['full'] = '1'
            else:
                clone_parameters['full'] = '0'
            for param in valid_clone_parameters:
                if self.module.params[param] is not None:
                    clone_parameters[param] = self.module.params[param]

            taskid = getattr(proxmox_node,
                             VZ_TYPE)(clone).clone.post(newid=vmid,
                                                        **clone_parameters)
        else:
            taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid,
                                                           storage=storage,
                                                           memory=memory,
                                                           swap=swap,
                                                           **kwargs)

        while timeout:
            if (proxmox_node.tasks(taskid).status.get()['status'] == 'stopped'
                    and proxmox_node.tasks(taskid).status.get()['exitstatus']
                    == 'OK'):
                return True
            timeout -= 1
            if timeout == 0:
                self.module.fail_json(
                    msg=
                    'Reached timeout while waiting for creating VM. Last line in task before timeout: %s'
                    % proxmox_node.tasks(taskid).log.get()[:1])

            time.sleep(1)
        return False
Exemplo n.º 26
0
def cloud_block_storage(module, state, name, description, meta, size,
                        snapshot_id, volume_type, wait, wait_timeout,
                        image):
    changed = False
    volume = None
    instance = {}

    cbs = pyrax.cloud_blockstorage

    if cbs is None:
        module.fail_json(msg='Failed to instantiate client. This '
                             'typically indicates an invalid region or an '
                             'incorrectly capitalized region name.')

    if image:
        # pyrax<1.9.3 did not have support for specifying an image when
        # creating a volume which is required for bootable volumes
        if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'):
            module.fail_json(msg='Creating a bootable volume requires '
                                 'pyrax>=1.9.3')
        image = rax_find_image(module, pyrax, image)

    volume = rax_find_volume(module, pyrax, name)

    if state == 'present':
        if not volume:
            kwargs = dict()
            if image:
                kwargs['image'] = image
            try:
                volume = cbs.create(name, size=size, volume_type=volume_type,
                                    description=description,
                                    metadata=meta,
                                    snapshot_id=snapshot_id, **kwargs)
                changed = True
            except Exception as e:
                module.fail_json(msg='%s' % e.message)
            else:
                if wait:
                    attempts = wait_timeout // 5
                    pyrax.utils.wait_for_build(volume, interval=5,
                                               attempts=attempts)

        volume.get()
        instance = rax_to_dict(volume)

        result = dict(changed=changed, volume=instance)

        if volume.status == 'error':
            result['msg'] = '%s failed to build' % volume.id
        elif wait and volume.status not in VOLUME_STATUS:
            result['msg'] = 'Timeout waiting on %s' % volume.id

        if 'msg' in result:
            module.fail_json(**result)
        else:
            module.exit_json(**result)

    elif state == 'absent':
        if volume:
            instance = rax_to_dict(volume)
            try:
                volume.delete()
                changed = True
            except Exception as e:
                module.fail_json(msg='%s' % e.message)

    module.exit_json(changed=changed, volume=instance)
Exemplo n.º 27
0
def main():
    tls_map = {}

    try:
        tls_map['tlsv1.2'] = ssl.PROTOCOL_TLSv1_2
    except AttributeError:
        pass

    try:
        tls_map['tlsv1.1'] = ssl.PROTOCOL_TLSv1_1
    except AttributeError:
        pass

    module = AnsibleModule(
        argument_spec=dict(
            server=dict(default='localhost'),
            port=dict(default=1883, type='int'),
            topic=dict(required=True),
            payload=dict(required=True),
            client_id=dict(default=None),
            qos=dict(default="0", choices=["0", "1", "2"]),
            retain=dict(default=False, type='bool'),
            username=dict(default=None),
            password=dict(default=None, no_log=True),
            ca_cert=dict(default=None, type='path', aliases=['ca_certs']),
            client_cert=dict(default=None, type='path', aliases=['certfile']),
            client_key=dict(default=None, type='path', aliases=['keyfile']),
            tls_version=dict(default=None, choices=['tlsv1.1', 'tlsv1.2'])
        ),
        supports_check_mode=True
    )

    if not HAS_PAHOMQTT:
        module.fail_json(msg=missing_required_lib('paho-mqtt'), exception=PAHOMQTT_IMP_ERR)

    server = module.params.get("server", 'localhost')
    port = module.params.get("port", 1883)
    topic = module.params.get("topic")
    payload = module.params.get("payload")
    client_id = module.params.get("client_id", '')
    qos = int(module.params.get("qos", 0))
    retain = module.params.get("retain")
    username = module.params.get("username", None)
    password = module.params.get("password", None)
    ca_certs = module.params.get("ca_cert", None)
    certfile = module.params.get("client_cert", None)
    keyfile = module.params.get("client_key", None)
    tls_version = module.params.get("tls_version", None)

    if client_id is None:
        client_id = "%s_%s" % (socket.getfqdn(), os.getpid())

    if payload and payload == 'None':
        payload = None

    auth = None
    if username is not None:
        auth = {'username': username, 'password': password}

    tls = None
    if ca_certs is not None:
        if tls_version:
            tls_version = tls_map.get(tls_version, ssl.PROTOCOL_SSLv23)
        else:
            if LooseVersion(platform.python_version()) <= LooseVersion("3.5.2"):
                # Specifying `None` on later versions of python seems sufficient to
                # instruct python to autonegotiate the SSL/TLS connection. On versions
                # 3.5.2 and lower though we need to specify the version.
                #
                # Note that this is an alias for PROTOCOL_TLS, but PROTOCOL_TLS was
                # not available until 3.5.3.
                tls_version = ssl.PROTOCOL_SSLv23

        tls = {
            'ca_certs': ca_certs,
            'certfile': certfile,
            'keyfile': keyfile,
            'tls_version': tls_version,
        }

    try:
        mqtt.single(
            topic,
            payload,
            qos=qos,
            retain=retain,
            client_id=client_id,
            hostname=server,
            port=port,
            auth=auth,
            tls=tls
        )
    except Exception as e:
        module.fail_json(
            msg="unable to publish to MQTT broker %s" % to_native(e),
            exception=traceback.format_exc()
        )

    module.exit_json(changed=False, topic=topic)
Exemplo n.º 28
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            path=dict(required=True, type='path'),
            state=dict(required=False,
                       type='str',
                       default='present',
                       choices=['absent', 'present']),
            holder=dict(required=False, type='str'),
            divert=dict(required=False, type='path'),
            rename=dict(required=False, type='bool', default=False),
            force=dict(required=False, type='bool', default=False),
        ),
        supports_check_mode=True,
    )

    path = module.params['path']
    state = module.params['state']
    holder = module.params['holder']
    divert = module.params['divert']
    rename = module.params['rename']
    force = module.params['force']

    diversion_wanted = dict(path=path, state=state)
    changed = False

    DPKG_DIVERT = module.get_bin_path('dpkg-divert', required=True)
    MAINCOMMAND = [DPKG_DIVERT]

    # Option --listpackage is needed and comes with 1.15.0
    rc, stdout, stderr = module.run_command([DPKG_DIVERT, '--version'],
                                            check_rc=True)
    [current_version] = [
        x for x in stdout.splitlines()[0].split()
        if re.match('^[0-9]+[.][0-9]', x)
    ]
    if LooseVersion(current_version) < LooseVersion("1.15.0"):
        module.fail_json(msg="Unsupported dpkg version (<1.15.0).")
    no_rename_is_supported = (LooseVersion(current_version) >=
                              LooseVersion("1.19.1"))

    b_path = to_bytes(path, errors='surrogate_or_strict')
    path_exists = os.path.exists(b_path)
    # Used for things not doable with a single dpkg-divert command (as forced
    # renaming of files, and diversion's 'holder' or 'divert' updates).
    target_exists = False
    truename_exists = False

    diversion_before = diversion_state(module, DPKG_DIVERT, path)
    if diversion_before['state'] == 'present':
        b_divert = to_bytes(diversion_before['divert'],
                            errors='surrogate_or_strict')
        truename_exists = os.path.exists(b_divert)

    # Append options as requested in the task parameters, but ignore some of
    # them when removing the diversion.
    if rename:
        MAINCOMMAND.append('--rename')
    elif no_rename_is_supported:
        MAINCOMMAND.append('--no-rename')

    if state == 'present':
        if holder and holder != 'LOCAL':
            MAINCOMMAND.extend(['--package', holder])
            diversion_wanted['holder'] = holder
        else:
            MAINCOMMAND.append('--local')
            diversion_wanted['holder'] = 'LOCAL'

        if divert:
            MAINCOMMAND.extend(['--divert', divert])
            target = divert
        else:
            target = '%s.distrib' % path

        MAINCOMMAND.extend(['--add', path])
        diversion_wanted['divert'] = target
        b_target = to_bytes(target, errors='surrogate_or_strict')
        target_exists = os.path.exists(b_target)

    else:
        MAINCOMMAND.extend(['--remove', path])
        diversion_wanted['divert'] = None
        diversion_wanted['holder'] = None

    # Start to populate the returned objects.
    diversion = diversion_before.copy()
    maincommand = ' '.join(MAINCOMMAND)
    commands = [maincommand]

    if module.check_mode or diversion_wanted == diversion_before:
        MAINCOMMAND.insert(1, '--test')
        diversion_after = diversion_wanted

    # Just try and see
    rc, stdout, stderr = module.run_command(MAINCOMMAND)

    if rc == 0:
        messages = [stdout.rstrip()]

    # else... cases of failure with dpkg-divert are:
    # - The diversion does not belong to the same package (or LOCAL)
    # - The divert filename is not the same (e.g. path.distrib != path.divert)
    # - The renaming is forbidden by dpkg-divert (i.e. both the file and the
    #   diverted file exist)

    elif state != diversion_before['state']:
        # There should be no case with 'divert' and 'holder' when creating the
        # diversion from none, and they're ignored when removing the diversion.
        # So this is all about renaming...
        if rename and path_exists and ((state == 'absent' and truename_exists)
                                       or
                                       (state == 'present' and target_exists)):
            if not force:
                msg = "Set 'force' param to True to force renaming of files."
                module.fail_json(changed=changed,
                                 cmd=maincommand,
                                 rc=rc,
                                 msg=msg,
                                 stderr=stderr,
                                 stdout=stdout,
                                 diversion=diversion)
        else:
            msg = "Unexpected error while changing state of the diversion."
            module.fail_json(changed=changed,
                             cmd=maincommand,
                             rc=rc,
                             msg=msg,
                             stderr=stderr,
                             stdout=stdout,
                             diversion=diversion)

        to_remove = path
        if state == 'present':
            to_remove = target

        if not module.check_mode:
            try:
                b_remove = to_bytes(to_remove, errors='surrogate_or_strict')
                os.unlink(b_remove)
            except OSError as e:
                msg = 'Failed to remove %s: %s' % (to_remove, to_native(e))
                module.fail_json(changed=changed,
                                 cmd=maincommand,
                                 rc=rc,
                                 msg=msg,
                                 stderr=stderr,
                                 stdout=stdout,
                                 diversion=diversion)
            rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True)

        messages = [stdout.rstrip()]

    # The situation is that we want to modify the settings (holder or divert)
    # of an existing diversion. dpkg-divert does not handle this, and we have
    # to remove the existing diversion first, and then set a new one.
    else:
        RMDIVERSION = [DPKG_DIVERT, '--remove', path]
        if no_rename_is_supported:
            RMDIVERSION.insert(1, '--no-rename')
        rmdiversion = ' '.join(RMDIVERSION)

        if module.check_mode:
            RMDIVERSION.insert(1, '--test')

        if rename:
            MAINCOMMAND.remove('--rename')
            if no_rename_is_supported:
                MAINCOMMAND.insert(1, '--no-rename')
            maincommand = ' '.join(MAINCOMMAND)

        commands = [rmdiversion, maincommand]
        rc, rmdout, rmderr = module.run_command(RMDIVERSION, check_rc=True)

        if module.check_mode:
            messages = [rmdout.rstrip(), 'Running in check mode']
        else:
            rc, stdout, stderr = module.run_command(MAINCOMMAND, check_rc=True)
            messages = [rmdout.rstrip(), stdout.rstrip()]

            # Avoid if possible to orphan files (i.e. to dereference them in diversion
            # database but let them in place), but do not make renaming issues fatal.
            # BTW, this module is not about state of files involved in the diversion.
            old = diversion_before['divert']
            new = diversion_wanted['divert']
            if new != old:
                b_old = to_bytes(old, errors='surrogate_or_strict')
                b_new = to_bytes(new, errors='surrogate_or_strict')
                if os.path.exists(b_old) and not os.path.exists(b_new):
                    try:
                        os.rename(b_old, b_new)
                    except OSError as e:
                        pass

    if not module.check_mode:
        diversion_after = diversion_state(module, DPKG_DIVERT, path)

    diversion = diversion_after.copy()
    diff = dict()
    if module._diff:
        diff['before'] = diversion_before
        diff['after'] = diversion_after

    if diversion_after != diversion_before:
        changed = True

    if diversion_after == diversion_wanted:
        module.exit_json(changed=changed,
                         diversion=diversion,
                         commands=commands,
                         messages=messages,
                         diff=diff)
    else:
        msg = "Unexpected error: see stdout and stderr for details."
        module.fail_json(changed=changed,
                         cmd=maincommand,
                         rc=rc,
                         msg=msg,
                         stderr=stderr,
                         stdout=stdout,
                         diversion=diversion)
Exemplo n.º 29
0
def main():
    module_args = proxmox_auth_argument_spec()
    proxmox_args = dict(
        vmid=dict(type='int', required=False),
        node=dict(),
        pool=dict(),
        password=dict(no_log=True),
        hostname=dict(),
        ostemplate=dict(),
        disk=dict(type='str'),
        cores=dict(type='int'),
        cpus=dict(type='int'),
        memory=dict(type='int'),
        swap=dict(type='int'),
        netif=dict(type='dict'),
        mounts=dict(type='dict'),
        ip_address=dict(),
        onboot=dict(type='bool'),
        features=dict(type='list', elements='str'),
        storage=dict(default='local'),
        cpuunits=dict(type='int'),
        nameserver=dict(),
        searchdomain=dict(),
        timeout=dict(type='int', default=30),
        force=dict(type='bool', default=False),
        purge=dict(type='bool', default=False),
        state=dict(
            default='present',
            choices=['present', 'absent', 'stopped', 'started', 'restarted']),
        pubkey=dict(type='str', default=None),
        unprivileged=dict(type='bool', default=False),
        description=dict(type='str'),
        hookscript=dict(type='str'),
        proxmox_default_behavior=dict(type='str',
                                      default='no_defaults',
                                      choices=['compatibility',
                                               'no_defaults']),
        clone=dict(type='int'),
        clone_type=dict(default='opportunistic',
                        choices=['full', 'linked', 'opportunistic']),
    )
    module_args.update(proxmox_args)

    module = AnsibleModule(
        argument_spec=module_args,
        required_if=[
            ('state', 'present', ['node', 'hostname']),
            (
                'state', 'present', ('clone', 'ostemplate'), True
            ),  # Require one of clone and ostemplate. Together with mutually_exclusive this ensures that we
            # either clone a container or create a new one from a template file.
        ],
        required_together=[('api_token_id', 'api_token_secret')],
        required_one_of=[('api_password', 'api_token_id')],
        mutually_exclusive=[
            ('clone', 'ostemplate')
        ],  # Creating a new container is done either by cloning an existing one, or based on a template.
    )

    proxmox = ProxmoxLxcAnsible(module)

    global VZ_TYPE
    VZ_TYPE = 'openvz' if proxmox.version() < LooseVersion('4.0') else 'lxc'

    state = module.params['state']
    vmid = module.params['vmid']
    node = module.params['node']
    disk = module.params['disk']
    cpus = module.params['cpus']
    memory = module.params['memory']
    swap = module.params['swap']
    storage = module.params['storage']
    hostname = module.params['hostname']
    if module.params['ostemplate'] is not None:
        template_store = module.params['ostemplate'].split(":")[0]
    timeout = module.params['timeout']
    clone = module.params['clone']

    if module.params['proxmox_default_behavior'] == 'compatibility':
        old_default_values = dict(
            disk="3",
            cores=1,
            cpus=1,
            memory=512,
            swap=0,
            onboot=False,
            cpuunits=1000,
        )
        for param, value in old_default_values.items():
            if module.params[param] is None:
                module.params[param] = value

    # If vmid not set get the Next VM id from ProxmoxAPI
    # If hostname is set get the VM id from ProxmoxAPI
    if not vmid and state == 'present':
        vmid = proxmox.get_nextvmid()
    elif not vmid and hostname:
        vmid = proxmox.get_vmid(hostname, choose_first_if_multiple=True)
    elif not vmid:
        module.exit_json(
            changed=False,
            msg="Vmid could not be fetched for the following action: %s" %
            state)

    # Create a new container
    if state == 'present' and clone is None:
        try:
            if proxmox.get_vm(
                    vmid, ignore_missing=True) and not module.params['force']:
                module.exit_json(changed=False,
                                 msg="VM with vmid = %s is already exists" %
                                 vmid)
            # If no vmid was passed, there cannot be another VM named 'hostname'
            if (not module.params['vmid']
                    and proxmox.get_vmid(hostname,
                                         ignore_missing=True,
                                         choose_first_if_multiple=True)
                    and not module.params['force']):
                vmid = proxmox.get_vmid(hostname,
                                        choose_first_if_multiple=True)
                module.exit_json(
                    changed=False,
                    msg=
                    "VM with hostname %s already exists and has ID number %s" %
                    (hostname, vmid))
            elif not proxmox.get_node(node):
                module.fail_json(msg="node '%s' not exists in cluster" % node)
            elif not proxmox.content_check(node, module.params['ostemplate'],
                                           template_store):
                module.fail_json(
                    msg="ostemplate '%s' not exists on node %s and storage %s"
                    % (module.params['ostemplate'], node, template_store))
        except Exception as e:
            module.fail_json(
                msg=
                "Pre-creation checks of {VZ_TYPE} VM {vmid} failed with exception: {e}"
                .format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))

        try:
            proxmox.create_instance(
                vmid,
                node,
                disk,
                storage,
                cpus,
                memory,
                swap,
                timeout,
                clone,
                cores=module.params['cores'],
                pool=module.params['pool'],
                password=module.params['password'],
                hostname=module.params['hostname'],
                ostemplate=module.params['ostemplate'],
                netif=module.params['netif'],
                mounts=module.params['mounts'],
                ip_address=module.params['ip_address'],
                onboot=ansible_to_proxmox_bool(module.params['onboot']),
                cpuunits=module.params['cpuunits'],
                nameserver=module.params['nameserver'],
                searchdomain=module.params['searchdomain'],
                force=ansible_to_proxmox_bool(module.params['force']),
                pubkey=module.params['pubkey'],
                features=",".join(module.params['features'])
                if module.params['features'] is not None else None,
                unprivileged=ansible_to_proxmox_bool(
                    module.params['unprivileged']),
                description=module.params['description'],
                hookscript=module.params['hookscript'])

            module.exit_json(changed=True,
                             msg="Deployed VM %s from template %s" %
                             (vmid, module.params['ostemplate']))
        except Exception as e:
            module.fail_json(
                msg="Creation of %s VM %s failed with exception: %s" %
                (VZ_TYPE, vmid, e))

    # Clone a container
    elif state == 'present' and clone is not None:
        try:
            if proxmox.get_vm(
                    vmid, ignore_missing=True) and not module.params['force']:
                module.exit_json(changed=False,
                                 msg="VM with vmid = %s is already exists" %
                                 vmid)
            # If no vmid was passed, there cannot be another VM named 'hostname'
            if (not module.params['vmid']
                    and proxmox.get_vmid(hostname,
                                         ignore_missing=True,
                                         choose_first_if_multiple=True)
                    and not module.params['force']):
                vmid = proxmox.get_vmid(hostname,
                                        choose_first_if_multiple=True)
                module.exit_json(
                    changed=False,
                    msg=
                    "VM with hostname %s already exists and has ID number %s" %
                    (hostname, vmid))
            if not proxmox.get_vm(clone, ignore_missing=True):
                module.exit_json(changed=False,
                                 msg="Container to be cloned does not exist")
        except Exception as e:
            module.fail_json(
                msg=
                "Pre-clone checks of {VZ_TYPE} VM {vmid} failed with exception: {e}"
                .format(VZ_TYPE=VZ_TYPE, vmid=vmid, e=e))

        try:
            proxmox.create_instance(vmid, node, disk, storage, cpus, memory,
                                    swap, timeout, clone)

            module.exit_json(changed=True,
                             msg="Cloned VM %s from %s" % (vmid, clone))
        except Exception as e:
            module.fail_json(msg="Cloning %s VM %s failed with exception: %s" %
                             (VZ_TYPE, vmid, e))

    elif state == 'started':
        try:
            vm = proxmox.get_vm(vmid)
            if getattr(
                    proxmox.proxmox_api.nodes(vm['node']),
                    VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
                module.exit_json(changed=False,
                                 msg="VM %s is already running" % vmid)

            if proxmox.start_instance(vm, vmid, timeout):
                module.exit_json(changed=True, msg="VM %s started" % vmid)
        except Exception as e:
            module.fail_json(
                msg="starting of VM %s failed with exception: %s" % (vmid, e))

    elif state == 'stopped':
        try:
            vm = proxmox.get_vm(vmid)

            if getattr(
                    proxmox.proxmox_api.nodes(vm['node']),
                    VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
                if module.params['force']:
                    if proxmox.umount_instance(vm, vmid, timeout):
                        module.exit_json(changed=True,
                                         msg="VM %s is shutting down" % vmid)
                else:
                    module.exit_json(
                        changed=False,
                        msg=("VM %s is already shutdown, but mounted. "
                             "You can use force option to umount it.") % vmid)

            if getattr(
                    proxmox.proxmox_api.nodes(vm['node']),
                    VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
                module.exit_json(changed=False,
                                 msg="VM %s is already shutdown" % vmid)

            if proxmox.stop_instance(vm,
                                     vmid,
                                     timeout,
                                     force=module.params['force']):
                module.exit_json(changed=True,
                                 msg="VM %s is shutting down" % vmid)
        except Exception as e:
            module.fail_json(
                msg="stopping of VM %s failed with exception: %s" % (vmid, e))

    elif state == 'restarted':
        try:
            vm = proxmox.get_vm(vmid)

            vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']),
                                VZ_TYPE)(vmid).status.current.get()['status']
            if vm_status in ['stopped', 'mounted']:
                module.exit_json(changed=False,
                                 msg="VM %s is not running" % vmid)

            if (proxmox.stop_instance(
                    vm, vmid, timeout, force=module.params['force'])
                    and proxmox.start_instance(vm, vmid, timeout)):
                module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
        except Exception as e:
            module.fail_json(
                msg="restarting of VM %s failed with exception: %s" %
                (vmid, e))

    elif state == 'absent':
        try:
            vm = proxmox.get_vm(vmid, ignore_missing=True)
            if not vm:
                module.exit_json(changed=False,
                                 msg="VM %s does not exist" % vmid)

            vm_status = getattr(proxmox.proxmox_api.nodes(vm['node']),
                                VZ_TYPE)(vmid).status.current.get()['status']
            if vm_status == 'running':
                module.exit_json(
                    changed=False,
                    msg="VM %s is running. Stop it before deletion." % vmid)

            if vm_status == 'mounted':
                module.exit_json(
                    changed=False,
                    msg=
                    "VM %s is mounted. Stop it with force option before deletion."
                    % vmid)

            delete_params = {}

            if module.params['purge']:
                delete_params['purge'] = 1

            taskid = getattr(proxmox.proxmox_api.nodes(vm['node']),
                             VZ_TYPE).delete(vmid, **delete_params)

            while timeout:
                task_status = proxmox.proxmox_api.nodes(
                    vm['node']).tasks(taskid).status.get()
                if (task_status['status'] == 'stopped'
                        and task_status['exitstatus'] == 'OK'):
                    module.exit_json(changed=True, msg="VM %s removed" % vmid)
                timeout -= 1
                if timeout == 0:
                    module.fail_json(
                        msg=
                        'Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
                        % proxmox.proxmox_api.nodes(vm['node']).tasks(
                            taskid).log.get()[:1])

                time.sleep(1)
        except Exception as e:
            module.fail_json(
                msg="deletion of VM %s failed with exception: %s" %
                (vmid, to_native(e)))
Exemplo n.º 30
0
def post_sendgrid_api(module,
                      username,
                      password,
                      from_address,
                      to_addresses,
                      subject,
                      body,
                      api_key=None,
                      cc=None,
                      bcc=None,
                      attachments=None,
                      html_body=False,
                      from_name=None,
                      headers=None):

    if not HAS_SENDGRID:
        SENDGRID_URI = "https://api.sendgrid.com/api/mail.send.json"
        AGENT = "Ansible"
        data = {
            'api_user': username,
            'api_key': password,
            'from': from_address,
            'subject': subject,
            'text': body
        }
        encoded_data = urlencode(data)
        to_addresses_api = ''
        for recipient in to_addresses:
            recipient = to_bytes(recipient, errors='surrogate_or_strict')
            to_addresses_api += '&to[]=%s' % recipient
        encoded_data += to_addresses_api

        headers = {
            'User-Agent': AGENT,
            'Content-type': 'application/x-www-form-urlencoded',
            'Accept': 'application/json'
        }
        return fetch_url(module,
                         SENDGRID_URI,
                         data=encoded_data,
                         headers=headers,
                         method='POST')
    else:
        # Remove this check when adding Sendgrid API v3 support
        if LooseVersion(sendgrid.version.__version__) > LooseVersion("1.6.22"):
            module.fail_json(
                msg=
                "Please install sendgrid==1.6.22 or lower since module uses Sendgrid V2 APIs."
            )

        if api_key:
            sg = sendgrid.SendGridClient(api_key)
        else:
            sg = sendgrid.SendGridClient(username, password)

        message = sendgrid.Mail()
        message.set_subject(subject)

        for recip in to_addresses:
            message.add_to(recip)

        if cc:
            for recip in cc:
                message.add_cc(recip)
        if bcc:
            for recip in bcc:
                message.add_bcc(recip)

        if headers:
            message.set_headers(headers)

        if attachments:
            for f in attachments:
                name = os.path.basename(f)
                message.add_attachment(name, f)

        if from_name:
            message.set_from('%s <%s.' % (from_name, from_address))
        else:
            message.set_from(from_address)

        if html_body:
            message.set_html(body)
        else:
            message.set_text(body)

        return sg.send(message)