コード例 #1
0
    def get_storage_pool(self, storage_pool_name):
        # global ifilter
        self.debug("fetching storage pools")
        # map the storage pool name to its id
        try:
            (rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid),
                                 headers=dict(Accept="application/json"), url_username=self.api_usr,
                                 url_password=self.api_pwd, validate_certs=self.validate_certs)
        except Exception:
            err = get_exception()
            rc = err.args[0]
            if rc == 404 and self.state == 'absent':
                self.module.exit_json(
                    msg="Storage pool [%s] did not exist." % (self.name))
            else:
                err = get_exception()
                self.module.exit_json(
                    msg="Failed to get storage pools. Array id [%s].  Error[%s]. State[%s]. RC[%s]." %
                        (self.ssid, str(err), self.state, rc))

        self.debug("searching for storage pool '%s'", storage_pool_name)

        pool_detail = next(select(lambda a: a['name'] == storage_pool_name, resp), None)

        if pool_detail:
            found = 'found'
        else:
            found = 'not found'
        self.debug(found)

        return pool_detail
コード例 #2
0
    def save(self):
        if len(self.repofile.sections()):
            # Write data into the file
            try:
                fd = open(self.params['dest'], 'wb')
            except IOError:
                e = get_exception()
                self.module.fail_json(
                    msg="Cannot open repo file %s." % self.params['dest'],
                    details=str(e))

            self.repofile.write(fd)

            try:
                fd.close()
            except IOError:
                e = get_exception()
                self.module.fail_json(
                    msg="Cannot write repo file %s." % self.params['dest'],
                    details=str(e))
        else:
            # Remove the file if there are not repos
            try:
                os.remove(self.params['dest'])
            except OSError:
                e = get_exception()
                self.module.fail_json(
                    msg=(
                        "Cannot remove empty repo file %s." %
                        self.params['dest']),
                    details=str(e))
コード例 #3
0
def get_host_and_group_map(module, ssid, api_url, user, pwd):
    mapping = dict(host=dict(), group=dict())

    hostgroups = 'storage-systems/%s/host-groups' % ssid
    groups_url = api_url + hostgroups
    try:
        hg_rc, hg_data = request(groups_url, headers=HEADERS, url_username=user, url_password=pwd)
    except:
        err = get_exception()
        module.fail_json(msg="Failed to get host groups. Id [%s]. Error [%s]" % (ssid, str(err)))

    for group in hg_data:
        mapping['group'][group['name']] = group['id']

    hosts = 'storage-systems/%s/hosts' % ssid
    hosts_url = api_url + hosts
    try:
        h_rc, h_data = request(hosts_url, headers=HEADERS, url_username=user, url_password=pwd)
    except:
        err = get_exception()
        module.fail_json(msg="Failed to get hosts. Id [%s]. Error [%s]" % (ssid, str(err)))

    for host in h_data:
        mapping['host'][host['name']] = host['id']

    return mapping
コード例 #4
0
def update_amg(module, ssid, api_url, api_usr, api_pwd, body, amg_id):
    endpoint = 'storage-systems/%s/async-mirrors/%s/role' % (ssid, amg_id)
    url = api_url + endpoint
    post_data = json.dumps(body)
    try:
        request(url, data=post_data, method='POST', url_username=api_usr,
                url_password=api_pwd, headers=HEADERS)
    except:
        err = get_exception()
        module.fail_json(
            msg="Failed to change role of AMG. Id [%s].  AMG Id [%s].  Error [%s]" % (ssid, amg_id, str(err)))

    status_endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, amg_id)
    status_url = api_url + status_endpoint
    try:
        rc, status = request(status_url, method='GET', url_username=api_usr,
                             url_password=api_pwd, headers=HEADERS)
    except:
        err = get_exception()
        module.fail_json(
            msg="Failed to check status of AMG after role reversal. " +
                "Id [%s].  AMG Id [%s].  Error [%s]" % (ssid, amg_id, str(err)))

    # Here we wait for the role reversal to complete
    if 'roleChangeProgress' in status:
        while status['roleChangeProgress'] != "none":
            try:
                rc, status = request(status_url, method='GET',
                                     url_username=api_usr, url_password=api_pwd, headers=HEADERS)
            except:
                err = get_exception()
                module.fail_json(
                    msg="Failed to check status of AMG after role reversal. " +
                        "Id [%s].  AMG Id [%s].  Error [%s]" % (ssid, amg_id, str(err)))
    return status
コード例 #5
0
def get_hosts_in_group(module, ssid, group_name, api_url, user, pwd):
    all_groups = "storage-systems/%s/host-groups" % ssid
    g_url = api_url + all_groups
    try:
        g_rc, g_data = request(g_url, method="GET", headers=HEADERS, url_username=user, url_password=pwd)
    except Exception:
        err = get_exception()
        module.fail_json(
            msg="Failed in first step getting hosts from group. Group: [%s]. Id [%s]. Error [%s]."
            % (group_name, ssid, str(err))
        )

    all_hosts = "storage-systems/%s/hosts" % ssid
    h_url = api_url + all_hosts
    try:
        h_rc, h_data = request(h_url, method="GET", headers=HEADERS, url_username=user, url_password=pwd)
    except Exception:
        err = get_exception()
        module.fail_json(
            msg="Failed in second step getting hosts from group. Group: [%s]. Id [%s]. Error [%s]."
            % (group_name, ssid, str(err))
        )

    hosts_in_group = []

    for hg in g_data:
        if hg["name"] == group_name:
            clusterRef = hg["clusterRef"]

    for host in h_data:
        if host["clusterRef"] == clusterRef:
            hosts_in_group.append(host["name"])

    return hosts_in_group
コード例 #6
0
    def _write_file(self, f, data):
        # Store the plugin into a temp file and then move it
        tmp_f = tempfile.mkstemp()

        try:
            fd = open(tmp_f, 'wb')
        except IOError:
            e = get_exception()
            self.module.fail_json(
                msg='Cannot open the temporal plugin file %s.' % tmp_f,
                details=str(e))

        if isinstance(data, str):
            d = data
        else:
            d = data.read()

        fd.write(d)

        try:
            fd.close()
        except IOError:
            e = get_exception()
            self.module.fail_json(
                msg='Cannot close the temporal plugin file %s.' % tmp_f,
                details=str(e))

        # Move the file onto the right place
        self.module.atomic_move(tmp_f, f)
コード例 #7
0
ファイル: netapp_e_volume.py プロジェクト: ernstp/ansible
    def get_volume(self, volume_name):
        self.debug('fetching volumes')
        # fetch the list of volume objects and look for one with a matching name (we'll need to merge volumes and thin-volumes)
        try:
            (rc, volumes) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid),
                                    headers=dict(Accept="application/json"), url_username=self.api_usr,
                                    url_password=self.api_pwd, validate_certs=self.validate_certs)
        except Exception:
            err = get_exception()
            self.module.fail_json(
                msg="Failed to obtain list of standard/thick volumes.  Array Id [%s]. Error[%s]." % (self.ssid,
                                                                                                     str(err)))

        try:
            self.debug('fetching thin-volumes')
            (rc, thinvols) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid),
                                     headers=dict(Accept="application/json"), url_username=self.api_usr,
                                     url_password=self.api_pwd, validate_certs=self.validate_certs)
        except Exception:
            err = get_exception()
            self.module.fail_json(
                msg="Failed to obtain list of thin volumes.  Array Id [%s]. Error[%s]." % (self.ssid, str(err)))

        volumes.extend(thinvols)

        self.debug("searching for volume '%s'", volume_name)
        volume_detail = next(ifilter(lambda a: a['name'] == volume_name, volumes), None)

        if volume_detail:
            self.debug('found')
        else:
            self.debug('not found')

        return volume_detail
コード例 #8
0
ファイル: seport.py プロジェクト: 2ndQuadrant/ansible
def semanage_port_add(module, ports, proto, setype, do_reload, serange='s0', sestore=''):
    """ Add SELinux port type definition to the policy.

    :type module: AnsibleModule
    :param module: Ansible module

    :type ports: list
    :param ports: List of ports and port ranges to add (e.g. ["8080", "8080-9090"])

    :type proto: str
    :param proto: Protocol ('tcp' or 'udp')

    :type setype: str
    :param setype: SELinux type

    :type do_reload: bool
    :param do_reload: Whether to reload SELinux policy after commit

    :type serange: str
    :param serange: SELinux MLS/MCS range (defaults to 's0')

    :type sestore: str
    :param sestore: SELinux store

    :rtype: bool
    :return: True if the policy was changed, otherwise False
    """
    try:
        seport = seobject.portRecords(sestore)
        seport.set_reload(do_reload)
        change = False
        ports_by_type = semanage_port_get_ports(seport, setype, proto)
        for port in ports:
            if port not in ports_by_type:
                change = True
                port_type = semanage_port_get_type(seport, port, proto)
                if port_type is None and not module.check_mode:
                    seport.add(port, proto, serange, setype)
                elif port_type is not None and not module.check_mode:
                    seport.modify(port, proto, serange, setype)

    except ValueError:
        e = get_exception()
        module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
    except IOError:
        e = get_exception()
        module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
    except KeyError:
        e = get_exception()
        module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
    except OSError:
        e = get_exception()
        module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))
    except RuntimeError:
        e = get_exception()
        module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, str(e)))

    return change
コード例 #9
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            domain=dict(aliases=['name'], required=True),
            store=dict(required=False, default=''),
            permissive=dict(type='bool', required=True),
            no_reload=dict(type='bool', required=False, default=False),
        ),
        supports_check_mode=True
    )

    # global vars
    changed = False
    store = module.params['store']
    permissive = module.params['permissive']
    domain = module.params['domain']
    no_reload = module.params['no_reload']

    if not HAVE_SEOBJECT:
        module.fail_json(changed=False, msg="policycoreutils-python required for this module")

    try:
        permissive_domains = seobject.permissiveRecords(store)
    except ValueError:
        e = get_exception()
        module.fail_json(domain=domain, msg=str(e))

    # not supported on EL 6
    if 'set_reload' in dir(permissive_domains):
        permissive_domains.set_reload(not no_reload)

    try:
        all_domains = permissive_domains.get_all()
    except ValueError:
        e = get_exception()
        module.fail_json(domain=domain, msg=str(e))

    if permissive:
        if domain not in all_domains:
            if not module.check_mode:
                try:
                    permissive_domains.add(domain)
                except ValueError:
                    e = get_exception()
                    module.fail_json(domain=domain, msg=str(e))
            changed = True
    else:
        if domain in all_domains:
            if not module.check_mode:
                try:
                    permissive_domains.delete(domain)
                except ValueError:
                    e = get_exception()
                    module.fail_json(domain=domain, msg=str(e))
            changed = True

    module.exit_json(changed=changed, store=store,
                     permissive=permissive, domain=domain)
コード例 #10
0
ファイル: vertica_facts.py プロジェクト: 2ndQuadrant/ansible
def main():

    module = AnsibleModule(
        argument_spec=dict(
            cluster=dict(default='localhost'),
            port=dict(default='5433'),
            db=dict(default=None),
            login_user=dict(default='dbadmin'),
            login_password=dict(default=None),
        ), supports_check_mode = True)

    if not pyodbc_found:
        module.fail_json(msg="The python pyodbc module is required.")

    db = ''
    if module.params['db']:
        db = module.params['db']

    try:
        dsn = (
            "Driver=Vertica;"
            "Server=%s;"
            "Port=%s;"
            "Database=%s;"
            "User=%s;"
            "Password=%s;"
            "ConnectionLoadBalance=%s"
            ) % (module.params['cluster'], module.params['port'], db,
                module.params['login_user'], module.params['login_password'], 'true')
        db_conn = pyodbc.connect(dsn, autocommit=True)
        cursor = db_conn.cursor()
    except Exception:
        e = get_exception()
        module.fail_json(msg="Unable to connect to database: %s." % str(e))

    try:
        schema_facts = get_schema_facts(cursor)
        user_facts = get_user_facts(cursor)
        role_facts = get_role_facts(cursor)
        configuration_facts = get_configuration_facts(cursor)
        node_facts = get_node_facts(cursor)
        module.exit_json(changed=False,
            ansible_facts={'vertica_schemas': schema_facts,
                           'vertica_users': user_facts,
                           'vertica_roles': role_facts,
                           'vertica_configuration': configuration_facts,
                           'vertica_nodes': node_facts})
    except NotSupportedError:
        e = get_exception()
        module.fail_json(msg=str(e))
    except SystemExit:
        # avoid catching this on python 2.4
        raise
    except Exception:
        e = get_exception()
        module.fail_json(msg=e)
コード例 #11
0
    def get_candidate_disks(self):
        self.debug("getting candidate disks...")

        # driveCapacityMin is broken on /drives POST. Per NetApp request we built our own
        # switch back to commented code below if it gets fixed
        # drives_req = dict(
        #     driveCount = self.criteria_drive_count,
        #     sizeUnit = 'mb',
        #     raidLevel = self.raid_level
        # )
        #
        # if self.criteria_drive_type:
        #     drives_req['driveType'] = self.criteria_drive_type
        # if self.criteria_disk_min_aggregate_size_mb:
        #     drives_req['targetUsableCapacity'] = self.criteria_disk_min_aggregate_size_mb
        #
        # # TODO: this arg appears to be ignored, uncomment if it isn't
        # #if self.criteria_disk_min_size_gb:
        # #    drives_req['driveCapacityMin'] = self.criteria_disk_min_size_gb * 1024
        # (rc,drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), data=json.dumps(drives_req), headers=self.post_headers,
        #                            method='POST', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs)
        #
        # if rc == 204:
        #     self.module.fail_json(msg='Cannot find disks to match requested criteria for storage pool')

        # disk_ids = [d['id'] for d in drives_resp]

        try:
            (rc, drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), method='GET',
                                        url_username=self.api_usr, url_password=self.api_pwd,
                                        validate_certs=self.validate_certs)
        except:
            err = get_exception()
            self.module.exit_json(
                msg="Failed to fetch disk drives. Array id [%s].  Error[%s]." % (self.ssid, str(err)))

        try:
            candidate_set = self.filter_drives(drives_resp,
                                               exact_drive_count=self.criteria_drive_count,
                                               drive_type=self.criteria_drive_type,
                                               min_drive_size=self.criteria_drive_min_size,
                                               raid_level=self.raid_level,
                                               size_unit=self.criteria_size_unit,
                                               min_total_capacity=self.criteria_min_usable_capacity,
                                               interface_type=self.criteria_drive_interface_type,
                                               fde_required=self.criteria_drive_require_fde
                                               )
        except:
            err = get_exception()
            self.module.fail_json(
                msg="Failed to allocate adequate drive count. Id [%s]. Error [%s]." % (self.ssid, str(err)))

        disk_ids = [d['id'] for d in candidate_set]

        return disk_ids
コード例 #12
0
def main():
    module = AnsibleModule(
        argument_spec = dict(
            name = dict(required=True),
            state = dict(choices=['started', 'stopped', 'restarted', 'killed', 'reloaded', 'once']),
            enabled = dict(required=False, type='bool'),
            downed = dict(required=False, type='bool'),
            dist = dict(required=False, default='daemontools'),
            service_dir = dict(required=False, default='/service'),
            service_src = dict(required=False, default='/etc/service'),
        ),
        supports_check_mode=True,
    )

    module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')

    state = module.params['state']
    enabled = module.params['enabled']
    downed = module.params['downed']

    svc = Svc(module)
    changed = False
    orig_state = svc.report()

    if enabled is not None and enabled != svc.enabled:
        changed = True
        if not module.check_mode:
            try:
                if enabled:
                    svc.enable()
                else:
                    svc.disable()
            except (OSError, IOError):
                e = get_exception()
                module.fail_json(msg="Could change service link: %s" % str(e))

    if state is not None and state != svc.state:
        changed = True
        if not module.check_mode:
            getattr(svc,state[:-2])()

    if downed is not None and downed != svc.downed:
        changed = True
        if not module.check_mode:
            d_file = "%s/down" % svc.svc_full
            try:
                if downed:
                    open(d_file, "a").close()
                else:
                    os.unlink(d_file)
            except (OSError, IOError):
                e = get_exception()
                module.fail_json(msg="Could change downed file: %s " % (str(e)))

    module.exit_json(changed=changed, svc=svc.report())
コード例 #13
0
ファイル: uri.py プロジェクト: 2ndQuadrant/ansible
def write_file(module, url, dest, content):
    # create a tempfile with some test content
    fd, tmpsrc = tempfile.mkstemp()
    f = open(tmpsrc, 'wb')
    try:
        f.write(content)
    except Exception:
        err = get_exception()
        os.remove(tmpsrc)
        module.fail_json(msg="failed to create temporary content file: %s" % str(err))
    f.close()

    checksum_src   = None
    checksum_dest  = None

    # raise an error if there is no tmpsrc file
    if not os.path.exists(tmpsrc):
        os.remove(tmpsrc)
        module.fail_json(msg="Source %s does not exist" % (tmpsrc))
    if not os.access(tmpsrc, os.R_OK):
        os.remove(tmpsrc)
        module.fail_json( msg="Source %s not readable" % (tmpsrc))
    checksum_src = module.sha1(tmpsrc)

    # check if there is no dest file
    if os.path.exists(dest):
        # raise an error if copy has no permission on dest
        if not os.access(dest, os.W_OK):
            os.remove(tmpsrc)
            module.fail_json(msg="Destination %s not writable" % (dest))
        if not os.access(dest, os.R_OK):
            os.remove(tmpsrc)
            module.fail_json(msg="Destination %s not readable" % (dest))
        checksum_dest = module.sha1(dest)
    else:
        if not os.access(os.path.dirname(dest), os.W_OK):
            os.remove(tmpsrc)
            module.fail_json(msg="Destination dir %s not writable" % (os.path.dirname(dest)))

    if checksum_src != checksum_dest:
        try:
            shutil.copyfile(tmpsrc, dest)
        except Exception:
            err = get_exception()
            os.remove(tmpsrc)
            module.fail_json(msg="failed to copy %s to %s: %s" % (tmpsrc, dest, str(err)))

    os.remove(tmpsrc)
コード例 #14
0
ファイル: apt.py プロジェクト: likewg/DevOps
def download(module, deb):
    tempdir = os.path.dirname(__file__)
    package = os.path.join(tempdir, str(deb.rsplit('/', 1)[1]))
    # When downloading a deb, how much of the deb to download before
    # saving to a tempfile (64k)
    BUFSIZE = 65536

    try:
        rsp, info = fetch_url(module, deb)
        f = open(package, 'w')
        # Read 1kb at a time to save on ram
        while True:
            data = rsp.read(BUFSIZE)

            if data == "":
                break # End of file, break while loop

            f.write(data)
        f.close()
        deb = package
    except Exception:
        e = get_exception()
        module.fail_json(msg="Failure downloading %s, %s" % (deb, e))

    return deb
コード例 #15
0
ファイル: authorized_key.py プロジェクト: likewg/DevOps
def writekeys(module, filename, keys):

    fd, tmp_path = tempfile.mkstemp('', 'tmp', os.path.dirname(filename))
    f = open(tmp_path,"w")
    try:
        for index, key in keys.items():
            try:
                (keyhash,type,options,comment) = key
                option_str = ""
                if options:
                    option_strings = []
                    for option_key, value in options.items():
                        if value is None:
                            option_strings.append("%s" % option_key)
                        else:
                            option_strings.append("%s=%s" % (option_key, value))
                    option_str = ",".join(option_strings)
                    option_str += " "
                key_line = "%s%s %s %s\n" % (option_str, type, keyhash, comment)
            except:
                key_line = key
            f.writelines(key_line)
    except IOError:
        e = get_exception()
        module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, str(e)))
    f.close()
    module.atomic_move(tmp_path, filename)
コード例 #16
0
ファイル: netapp_e_auth.py プロジェクト: 2ndQuadrant/ansible
def set_password(module, ssid, api_url, user, pwd, current_password=None, new_password=None, set_admin=False):
    set_pass = "******" % ssid
    url = api_url + set_pass

    if not current_password:
        current_password = ""

    post_body = json.dumps(
        dict(currentAdminPassword=current_password, adminPassword=set_admin, newPassword=new_password))

    try:
        rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd,
                           ignore_errors=True)
    except:
        err = get_exception()
        module.fail_json(msg="Failed to set system password. Id [%s].  Error [%s]" % (ssid, str(err)))

    if rc == 422:
        post_body = json.dumps(dict(currentAdminPassword='', adminPassword=set_admin, newPassword=new_password))
        try:
            rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd)
        except Exception:
            module.fail_json(msg="Wrong or no admin password supplied. Please update your playbook and try again")

    update_data = update_storage_system_pwd(module, ssid, new_password, api_url, user, pwd)

    if int(rc) == 204:
        return update_data
    else:
        module.fail_json(msg="%s:%s" % (rc, data))
コード例 #17
0
ファイル: apt.py プロジェクト: ernstp/ansible
def download(module, deb):
    tempdir = os.path.dirname(__file__)
    package = os.path.join(tempdir, str(deb.rsplit('/', 1)[1]))
    # When downloading a deb, how much of the deb to download before
    # saving to a tempfile (64k)
    BUFSIZE = 65536

    try:
        rsp, info = fetch_url(module, deb, method='GET')
        if info['status'] != 200:
            module.fail_json(msg="Failed to download %s, %s" % (deb,
                                                                info['msg']))
        # Ensure file is open in binary mode for Python 3
        f = open(package, 'wb')
        # Read 1kb at a time to save on ram
        while True:
            data = rsp.read(BUFSIZE)
            data = to_bytes(data, errors='surrogate_or_strict')

            if len(data) < 1:
                break # End of file, break while loop

            f.write(data)
        f.close()
        deb = package
    except Exception:
        e = get_exception()
        module.fail_json(msg="Failure downloading %s, %s" % (deb, e))

    return deb
コード例 #18
0
def main():

    module = AnsibleModule(
        argument_spec = dict(
            nsc_host = dict(required=True),
            nsc_protocol = dict(default='https'),
            user = dict(required=True),
            password = dict(required=True),
            action = dict(default='enable', choices=['enable','disable']),
            name = dict(default=socket.gethostname()),
            type = dict(default='server', choices=['service', 'server']),
            validate_certs=dict(default='yes', type='bool'),
        )
    )

    rc = 0
    try:
        rc, result = core(module)
    except Exception:
        e = get_exception()
        module.fail_json(msg=str(e))

    if rc != 0:
        module.fail_json(rc=rc, msg=result)
    else:
        result['changed'] = True
        module.exit_json(**result)
コード例 #19
0
ファイル: junos_package.py プロジェクト: ernstp/ansible
def connect(module):
    host = get_param(module, 'host')

    kwargs = {
        'port': get_param(module, 'port') or 830,
        'user': get_param(module, 'username')
    }

    if get_param(module, 'password'):
        kwargs['passwd'] = get_param(module, 'password')

    if get_param(module, 'ssh_keyfile'):
        kwargs['ssh_private_key_file'] = get_param(module, 'ssh_keyfile')

    kwargs['gather_facts'] = False

    try:
        device = Device(host, **kwargs)
        device.open()
        device.timeout = get_param(module, 'timeout') or 10
    except ConnectError:
        exc = get_exception()
        module.fail_json('unable to connect to %s: %s' % (host, str(exc)))

    return device
コード例 #20
0
def _attach_volume(module, profitbricks, datacenter, volume):
    """
    Attaches a volume.

    This will attach a volume to the server.

    module : AnsibleModule object
    profitbricks: authenticated profitbricks object.

    Returns:
        True if the volume was attached, false otherwise
    """
    server = module.params.get('server')

    # Locate UUID for Server
    if server:
        if not (uuid_match.match(server)):
            server_list = profitbricks.list_servers(datacenter)
            for s in server_list['items']:
                if server == s['properties']['name']:
                    server = s['id']
                    break

        try:
            return profitbricks.attach_volume(datacenter, server, volume)
        except Exception:
            e = get_exception()
            module.fail_json(msg='failed to attach volume: %s' % str(e))
コード例 #21
0
ファイル: known_hosts.py プロジェクト: 2ndQuadrant/ansible
def sanity_check(module,host,key,sshkeygen):
    '''Check supplied key is sensible

    host and key are parameters provided by the user; If the host
    provided is inconsistent with the key supplied, then this function
    quits, providing an error to the user.
    sshkeygen is the path to ssh-keygen, found earlier with get_bin_path
    '''
    #If no key supplied, we're doing a removal, and have nothing to check here.
    if key is None:
        return
    #Rather than parsing the key ourselves, get ssh-keygen to do it
    #(this is essential for hashed keys, but otherwise useful, as the
    #key question is whether ssh-keygen thinks the key matches the host).

    #The approach is to write the key to a temporary file,
    #and then attempt to look up the specified host in that file.
    try:
        outf=tempfile.NamedTemporaryFile()
        outf.write(key)
        outf.flush()
    except IOError:
        e = get_exception()
        module.fail_json(msg="Failed to write to temporary file %s: %s" % \
                             (outf.name,str(e)))
    rc,stdout,stderr=module.run_command([sshkeygen,'-F',host,
                                         '-f',outf.name],
                                        check_rc=True)
    try:
        outf.close()
    except:
        pass

    if stdout=='': #host not found
        module.fail_json(msg="Host parameter does not match hashed host field in supplied key")
コード例 #22
0
ファイル: ipa_hostgroup.py プロジェクト: 2ndQuadrant/ansible
def main():
    module = AnsibleModule(
        argument_spec=dict(
            cn=dict(type="str", required=True, aliases=["name"]),
            description=dict(type="str", required=False),
            host=dict(type="list", required=False),
            hostgroup=dict(type="list", required=False),
            state=dict(
                type="str", required=False, default="present", choices=["present", "absent", "enabled", "disabled"]
            ),
            ipa_prot=dict(type="str", required=False, default="https", choices=["http", "https"]),
            ipa_host=dict(type="str", required=False, default="ipa.example.com"),
            ipa_port=dict(type="int", required=False, default=443),
            ipa_user=dict(type="str", required=False, default="admin"),
            ipa_pass=dict(type="str", required=True, no_log=True),
            validate_certs=dict(type="bool", required=False, default=True),
        ),
        supports_check_mode=True,
    )

    client = HostGroupIPAClient(
        module=module,
        host=module.params["ipa_host"],
        port=module.params["ipa_port"],
        protocol=module.params["ipa_prot"],
    )

    try:
        client.login(username=module.params["ipa_user"], password=module.params["ipa_pass"])
        changed, hostgroup = ensure(module, client)
        module.exit_json(changed=changed, hostgroup=hostgroup)
    except Exception:
        e = get_exception()
        module.fail_json(msg=str(e))
コード例 #23
0
ファイル: sefcontext.py プロジェクト: 2ndQuadrant/ansible
def semanage_fcontext_delete(module, result, target, ftype, do_reload, sestore=''):
    ''' Delete SELinux file context mapping definition from the policy. '''

    changed = False
    prepared_diff = ''

    try:
        sefcontext = seobject.fcontextRecords(sestore)
        sefcontext.set_reload(do_reload)
        exists = semanage_fcontext_exists(sefcontext, target, ftype)
        if exists:
            # Remove existing entry
            orig_seuser, orig_serole, orig_setype, orig_serange = exists

            if not module.check_mode:
                sefcontext.delete(target, ftype)
            changed = True

            if module._diff:
                prepared_diff += '# Deletion to semanage file context mappings\n'
                prepared_diff += '-%s      %s      %s:%s:%s:%s\n' % (target, ftype, exists[0], exists[1], exists[2], exists[3])

    except Exception:
        e = get_exception()
        module.fail_json(msg="%s: %s\n" % (e.__class__.__name__, to_native(e)))

    if module._diff and prepared_diff:
        result['diff'] = dict(prepared=prepared_diff)

    module.exit_json(changed=changed, **result)
コード例 #24
0
ファイル: cronvar.py プロジェクト: 2ndQuadrant/ansible
    def read(self):
        # Read in the crontab from the system
        self.lines = []
        if self.cron_file:
            # read the cronfile
            try:
                f = open(self.cron_file, 'r')
                self.lines = f.read().splitlines()
                f.close()
            except IOError:
                e = get_exception()
                # cron file does not exist
                return
            except:
                raise CronVarError("Unexpected error:", sys.exc_info()[0])
        else:
            # using safely quoted shell for now, but this really should be two non-shell calls instead.  FIXME
            (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)

            if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
                raise CronVarError("Unable to read crontab")

            lines = out.splitlines()
            count = 0
            for l in lines:
                if count > 2 or (not re.match( r'# DO NOT EDIT THIS FILE - edit the master and reinstall.', l) and
                                 not re.match( r'# \(/tmp/.*installed on.*\)', l) and
                                 not re.match( r'# \(.*version.*\)', l)):
                    self.lines.append(l)
                count += 1
コード例 #25
0
ファイル: aos_blueprint_param.py プロジェクト: ernstp/ansible
def blueprint_param_present(module, aos, blueprint, param, param_value):

    margs = module.params

    # If param_value is not defined, just return the object
    if not param_value:
        module.exit_json(changed=False,
                         blueprint=blueprint.name,
                         name=param.name,
                         value=param.value)

    # Check if current value is the same or not
    elif param.value != param_value:
        if not module.check_mode:
            try:
                param.value = param_value
            except:
                exc = get_exception()
                module.fail_json(msg='unable to write to param %s: %r' %
                                     (margs['name'], exc))

        module.exit_json(changed=True,
                         blueprint=blueprint.name,
                         name=param.name,
                         value=param.value)

    # If value are already the same, nothing needs to be changed
    else:
        module.exit_json(changed=False,
                         blueprint=blueprint.name,
                         name=param.name,
                         value=param.value)
コード例 #26
0
ファイル: junos.py プロジェクト: likewg/DevOps
    def connect(self, params, **kwargs):
        host = params['host']

        kwargs = dict()
        kwargs['port'] = params.get('port') or 830

        kwargs['user'] = params['username']

        if params['password']:
            kwargs['passwd'] = params['password']

        if params['ssh_keyfile']:
            kwargs['ssh_private_key_file'] = params['ssh_keyfile']

        kwargs['gather_facts'] = False

        try:
            self.device = Device(host, **kwargs)
            self.device.open()
        except ConnectError:
            exc = get_exception()
            self.raise_exc('unable to connect to %s: %s' % (host, str(exc)))

        self.config = Config(self.device)
        self._connected = True
コード例 #27
0
ファイル: junos.py プロジェクト: likewg/DevOps
 def cli(self, commands, output='xml'):
     '''Send commands to the device.'''
     try:
         return self.device.cli(commands, format=output, warning=False)
     except (ValueError, RpcError):
         exc = get_exception()
         self.raise_exc('Unable to get cli output: %s' % str(exc))
コード例 #28
0
def request(url, data=None, headers=None, method='GET', use_proxy=True,
            force=False, last_mod_time=None, timeout=10, validate_certs=True,
            url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
    try:
        r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
                     force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
                     url_username=url_username, url_password=url_password, http_agent=http_agent,
                     force_basic_auth=force_basic_auth)
    except HTTPError:
        err = get_exception()
        r = err.fp

    try:
        raw_data = r.read()
        if raw_data:
            data = json.loads(raw_data)
        else:
            raw_data = None
    except:
        if ignore_errors:
            pass
        else:
            raise Exception(raw_data)

    resp_code = r.getcode()

    if resp_code >= 400 and not ignore_errors:
        raise Exception(resp_code, data)
    else:
        return resp_code, data
コード例 #29
0
ファイル: junos.py プロジェクト: likewg/DevOps
 def lock_config(self):
     try:
         self.config.lock()
         self._locked = True
     except LockError:
         exc = get_exception()
         raise NetworkError('unable to lock config: %s' % str(exc))
コード例 #30
0
ファイル: junos.py プロジェクト: likewg/DevOps
 def unlock_config(self):
     try:
         self.config.unlock()
         self._locked = False
     except UnlockError:
         exc = get_exception()
         raise NetworkError('unable to unlock config: %s' % str(exc))
コード例 #31
0
def main():

    module = AnsibleModule(
        argument_spec=dict(
            schema=dict(required=True, aliases=['name']),
            usage_roles=dict(default=None, aliases=['usage_role']),
            create_roles=dict(default=None, aliases=['create_role']),
            owner=dict(default=None),
            state=dict(default='present', choices=['absent', 'present']),
            db=dict(default=None),
            cluster=dict(default='localhost'),
            port=dict(default='5433'),
            login_user=dict(default='dbadmin'),
            login_password=dict(default=None),
        ), supports_check_mode = True)

    if not pyodbc_found:
        module.fail_json(msg="The python pyodbc module is required.")

    schema = module.params['schema']
    usage_roles = []
    if module.params['usage_roles']:
        usage_roles = module.params['usage_roles'].split(',')
        usage_roles = filter(None, usage_roles)
    create_roles = []
    if module.params['create_roles']:
        create_roles = module.params['create_roles'].split(',')
        create_roles = filter(None, create_roles)
    owner = module.params['owner']
    state = module.params['state']
    db = ''
    if module.params['db']:
        db = module.params['db']

    changed = False

    try:
        dsn = (
            "Driver=Vertica;"
            "Server={0};"
            "Port={1};"
            "Database={2};"
            "User={3};"
            "Password={4};"
            "ConnectionLoadBalance={5}"
            ).format(module.params['cluster'], module.params['port'], db,
                module.params['login_user'], module.params['login_password'], 'true')
        db_conn = pyodbc.connect(dsn, autocommit=True)
        cursor = db_conn.cursor()
    except Exception:
        e = get_exception()
        module.fail_json(msg="Unable to connect to database: {0}.".format(e))

    try:
        schema_facts = get_schema_facts(cursor)
        if module.check_mode:
            changed = not check(schema_facts, schema, usage_roles, create_roles, owner)
        elif state == 'absent':
            try:
                changed = absent(schema_facts, cursor, schema, usage_roles, create_roles)
            except pyodbc.Error:
                e = get_exception()
                module.fail_json(msg=str(e))
        elif state == 'present':
            try:
                changed = present(schema_facts, cursor, schema, usage_roles, create_roles, owner)
            except pyodbc.Error:
                e = get_exception()
                module.fail_json(msg=str(e))
    except NotSupportedError:
        e = get_exception()
        module.fail_json(msg=str(e), ansible_facts={'vertica_schemas': schema_facts})
    except CannotDropError:
        e = get_exception()
        module.fail_json(msg=str(e), ansible_facts={'vertica_schemas': schema_facts})
    except SystemExit:
        # avoid catching this on python 2.4
        raise
    except Exception:
        e = get_exception()
        module.fail_json(msg=e)

    module.exit_json(changed=changed, schema=schema, ansible_facts={'vertica_schemas': schema_facts})
コード例 #32
0
ファイル: stat.py プロジェクト: zoobab/ansible-modules-core
def main():
    module = AnsibleModule(argument_spec=dict(
        path=dict(required=True, type='path'),
        follow=dict(default='no', type='bool'),
        get_md5=dict(default='yes', type='bool'),
        get_checksum=dict(default='yes', type='bool'),
        get_mime=dict(default=True,
                      type='bool',
                      aliases=['mime', 'mime_type', 'mime-type']),
        get_attributes=dict(default=True,
                            type='bool',
                            aliases=['attributes', 'attr']),
        checksum_algorithm=dict(
            default='sha1',
            type='str',
            choices=['sha1', 'sha224', 'sha256', 'sha384', 'sha512'],
            aliases=['checksum_algo', 'checksum']),
    ),
                           supports_check_mode=True)

    path = module.params.get('path')
    b_path = to_bytes(path, errors='surrogate_or_strict')
    follow = module.params.get('follow')
    get_mime = module.params.get('get_mime')
    get_attr = module.params.get('get_attributes')
    get_md5 = module.params.get('get_md5')
    get_checksum = module.params.get('get_checksum')
    checksum_algorithm = module.params.get('checksum_algorithm')

    # main stat data
    try:
        if follow:
            st = os.stat(b_path)
        else:
            st = os.lstat(b_path)
    except OSError:
        e = get_exception()
        if e.errno == errno.ENOENT:
            output = {'exists': False}
            module.exit_json(changed=False, stat=output)

        module.fail_json(msg=e.strerror)

    # process base results
    output = format_output(module, path, st)

    # resolved permissions
    for perm in [('readable', os.R_OK), ('writeable', os.W_OK),
                 ('executable', os.X_OK)]:
        output[perm[0]] = os.access(path, perm[1])

    # symlink info
    if output.get('islnk'):
        output['lnk_source'] = os.path.realpath(path)

    try:  # user data
        pw = pwd.getpwuid(st.st_uid)
        output['pw_name'] = pw.pw_name
    except:
        pass

    try:  # group data
        grp_info = grp.getgrgid(st.st_gid)
        output['gr_name'] = grp_info.gr_name
    except:
        pass

    # checksums
    if output.get('isreg') and output.get('readable'):
        if get_md5:
            # Will fail on FIPS-140 compliant systems
            try:
                output['md5'] = module.md5(path)
            except ValueError:
                output['md5'] = None

        if get_checksum:
            output['checksum'] = module.digest_from_file(
                path, checksum_algorithm)

    # try to get mime data if requested
    if get_mime:
        output['mimetype'] = output['charset'] = 'unknown'
        mimecmd = module.get_bin_path('file')
        if mimecmd:
            mimecmd = [mimecmd, '-i', path]
            try:
                rc, out, err = module.run_command(mimecmd)
                if rc == 0:
                    mimetype, charset = out.split(':')[1].split(';')
                    output['mimetype'] = mimetype.strip()
                    output['charset'] = charset.split('=')[1].strip()
            except:
                pass

    # try to get attr data
    if get_attr:
        output['version'] = None
        output['attributes'] = []
        output['attr_flags'] = ''
        out = module.get_file_attributes(path)
        for x in ('version', 'attributes', 'attr_flags'):
            if x in out:
                output[x] = out[x]

    module.exit_json(changed=False, stat=output)
コード例 #33
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            domain=dict(
                default="NSGlobalDomain",
                required=False,
            ),
            host=dict(
                default=None,
                required=False,
            ),
            key=dict(
                default=None,
            ),
            type=dict(
                default="string",
                required=False,
                choices=[
                    "array",
                    "bool",
                    "boolean",
                    "date",
                    "float",
                    "int",
                    "integer",
                    "string",
                ],
            ),
            array_add=dict(
                default=False,
                required=False,
                type='bool',
            ),
            value=dict(
                default=None,
                required=False,
                type='raw'
            ),
            state=dict(
                default="present",
                required=False,
                choices=[
                    "absent", "present"
                ],
            ),
            path=dict(
                default="/usr/bin:/usr/local/bin",
                required=False,
            )
        ),
        supports_check_mode=True,
    )

    domain = module.params['domain']
    host = module.params['host']
    key = module.params['key']
    type = module.params['type']
    array_add = module.params['array_add']
    value = module.params['value']
    state = module.params['state']
    path = module.params['path']

    try:
        defaults = OSXDefaults(module=module, domain=domain, host=host, key=key, type=type,
                               array_add=array_add, value=value, state=state, path=path)
        changed = defaults.run()
        module.exit_json(changed=changed)
    except OSXDefaultsException:
        e = get_exception()
        module.fail_json(msg=e.message)
コード例 #34
0
ファイル: openssl_csr.py プロジェクト: sramakr/ansible-1
def main():
    module = AnsibleModule(
        argument_spec=dict(
            state=dict(default='present',
                       choices=['present', 'absent'],
                       type='str'),
            digest=dict(default='sha256', type='str'),
            privatekey_path=dict(require=True, type='path'),
            version=dict(default='3', type='int'),
            force=dict(default=False, type='bool'),
            subjectAltName=dict(aliases=['subjectAltName'], type='str'),
            path=dict(required=True, type='path'),
            countryName=dict(aliases=['C'], type='str'),
            stateOrProvinceName=dict(aliases=['ST'], type='str'),
            localityName=dict(aliases=['L'], type='str'),
            organizationName=dict(aliases=['O'], type='str'),
            organizationalUnitName=dict(aliases=['OU'], type='str'),
            commonName=dict(aliases=['CN'], type='str'),
            emailAddress=dict(aliases=['E'], type='str'),
        ),
        add_file_common_args=True,
        supports_check_mode=True,
        required_one_of=[['commonName', 'subjectAltName']],
    )

    path = module.params['path']
    base_dir = os.path.dirname(module.params['path'])

    if not os.path.isdir(base_dir):
        module.fail_json(name=path,
                         msg='The directory %s does not exist' % path)

    csr = CertificateSigningRequest(module)

    if module.params['state'] == 'present':

        if module.check_mode:
            result = csr.dump()
            result['changed'] = module.params['force'] or not os.path.exists(
                path)
            module.exit_json(**result)

        try:
            csr.generate(module)
        except CertificateSigningRequestError:
            e = get_exception()
            module.fail_json(msg=str(e))

    else:

        if module.check_mode:
            result = csr.dump()
            result['changed'] = os.path.exists(path)
            module.exit_json(**result)

        try:
            csr.remove()
        except CertificateSigningRequestError:
            e = get_exception()
            module.fail_json(msg=str(e))

    result = csr.dump()

    module.exit_json(**result)
コード例 #35
0
ファイル: cloudflare_dns.py プロジェクト: yuzhida/ansible
    def _cf_simple_api_call(self, api_call, method='GET', payload=None):
        headers = {
            'X-Auth-Email': self.account_email,
            'X-Auth-Key': self.account_api_token,
            'Content-Type': 'application/json'
        }
        data = None
        if payload:
            try:
                data = json.dumps(payload)
            except Exception:
                e = get_exception()
                self.module.fail_json(
                    msg="Failed to encode payload as JSON: %s " % str(e))

        resp, info = fetch_url(self.module,
                               self.cf_api_endpoint + api_call,
                               headers=headers,
                               data=data,
                               method=method,
                               timeout=self.timeout)

        if info['status'] not in [200, 304, 400, 401, 403, 429, 405, 415]:
            self.module.fail_json(
                msg="Failed API call {0}; got unexpected HTTP code {1}".format(
                    api_call, info['status']))

        error_msg = ''
        if info['status'] == 401:
            # Unauthorized
            error_msg = "API user does not have permission; Status: {0}; Method: {1}: Call: {2}".format(
                info['status'], method, api_call)
        elif info['status'] == 403:
            # Forbidden
            error_msg = "API request not authenticated; Status: {0}; Method: {1}: Call: {2}".format(
                info['status'], method, api_call)
        elif info['status'] == 429:
            # Too many requests
            error_msg = "API client is rate limited; Status: {0}; Method: {1}: Call: {2}".format(
                info['status'], method, api_call)
        elif info['status'] == 405:
            # Method not allowed
            error_msg = "API incorrect HTTP method provided; Status: {0}; Method: {1}: Call: {2}".format(
                info['status'], method, api_call)
        elif info['status'] == 415:
            # Unsupported Media Type
            error_msg = "API request is not valid JSON; Status: {0}; Method: {1}: Call: {2}".format(
                info['status'], method, api_call)
        elif info['status'] == 400:
            # Bad Request
            error_msg = "API bad request; Status: {0}; Method: {1}: Call: {2}".format(
                info['status'], method, api_call)

        result = None
        try:
            content = resp.read()
        except AttributeError:
            if info['body']:
                content = info['body']
            else:
                error_msg += "; The API response was empty"

        if content:
            try:
                result = json.loads(content)
            except json.JSONDecodeError:
                error_msg += "; Failed to parse API response: {0}".format(
                    content)

        # received an error status but no data with details on what failed
        if (info['status'] not in [200, 304]) and (result is None):
            self.module.fail_json(msg=error_msg)

        if not result['success']:
            error_msg += "; Error details: "
            for error in result['errors']:
                error_msg += "code: {0}, error: {1}; ".format(
                    error['code'], error['message'])
                if 'error_chain' in error:
                    for chain_error in error['error_chain']:
                        error_msg += "code: {0}, error: {1}; ".format(
                            chain_error['code'], chain_error['message'])
            self.module.fail_json(msg=error_msg)

        return result, info['status']
コード例 #36
0
def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos, installroot='/'):

    pkgs = []
    res = {}
    res['results'] = []
    res['msg'] = ''
    res['rc'] = 0
    res['changed'] = False
    tempdir = tempfile.mkdtemp()

    for spec in items:
        pkg = None

        # check if pkgspec is installed (if possible for idempotence)
        # localpkg
        if spec.endswith('.rpm') and '://' not in spec:
            # get the pkg name-v-r.arch
            if not os.path.exists(spec):
                res['msg'] += "No RPM file matching '%s' found on system" % spec
                res['results'].append("No RPM file matching '%s' found on system" % spec)
                res['rc'] = 127 # Ensure the task fails in with-loop
                module.fail_json(**res)

            nvra = local_nvra(module, spec)

            # look for them in the rpmdb
            if is_installed(module, repoq, nvra, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot):
                # if they are there, skip it
                continue
            pkg = spec

        # URL
        elif '://' in spec:
            # download package so that we can check if it's already installed
            package = fetch_rpm_from_url(spec, module=module)
            nvra = local_nvra(module, package)
            if is_installed(module, repoq, nvra, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot):
                # if it's there, skip it
                continue
            pkg = package

        #groups :(
        elif spec.startswith('@'):
            # complete wild ass guess b/c it's a group
            pkg = spec

        # range requires or file-requires or pkgname :(
        else:
            # most common case is the pkg is already installed and done
            # short circuit all the bs - and search for it as a pkg in is_installed
            # if you find it then we're done
            if not set(['*','?']).intersection(set(spec)):
                installed_pkgs = is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True, installroot=installroot)
                if installed_pkgs:
                    res['results'].append('%s providing %s is already installed' % (installed_pkgs[0], spec))
                    continue

            # look up what pkgs provide this
            pkglist = what_provides(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot)
            if not pkglist:
                res['msg'] += "No package matching '%s' found available, installed or updated" % spec
                res['results'].append("No package matching '%s' found available, installed or updated" % spec)
                res['rc'] = 126 # Ensure the task fails in with-loop
                module.fail_json(**res)

            # if any of the packages are involved in a transaction, fail now
            # so that we don't hang on the yum operation later
            conflicts = transaction_exists(pkglist)
            if len(conflicts) > 0:
                res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
                res['rc'] = 125 # Ensure the task fails in with-loop
                module.fail_json(**res)

            # if any of them are installed
            # then nothing to do

            found = False
            for this in pkglist:
                if is_installed(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos, is_pkg=True, installroot=installroot):
                    found = True
                    res['results'].append('%s providing %s is already installed' % (this, spec))
                    break

            # if the version of the pkg you have installed is not in ANY repo, but there are
            # other versions in the repos (both higher and lower) then the previous checks won't work.
            # so we check one more time. This really only works for pkgname - not for file provides or virt provides
            # but virt provides should be all caught in what_provides on its own.
            # highly irritating
            if not found:
                if is_installed(module, repoq, spec, conf_file, en_repos=en_repos, dis_repos=dis_repos, installroot=installroot):
                    found = True
                    res['results'].append('package providing %s is already installed' % (spec))

            if found:
                continue

            # if not - then pass in the spec as what to install
            # we could get here if nothing provides it but that's not
            # the error we're catching here
            pkg = spec

        pkgs.append(pkg)

    if pkgs:
        cmd = yum_basecmd + ['install'] + pkgs

        if module.check_mode:
            # Remove rpms downloaded for EL5 via url
            try:
                shutil.rmtree(tempdir)
            except Exception:
                e = get_exception()
                module.fail_json(msg="Failure deleting temp directory %s, %s" % (tempdir, e))

            module.exit_json(changed=True, results=res['results'], changes=dict(installed=pkgs))

        changed = True

        lang_env = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
        rc, out, err = module.run_command(cmd, environ_update=lang_env)

        if (rc == 1):
            for spec in items:
                # Fail on invalid urls:
                if ('://' in spec and ('No package %s available.' % spec in out or 'Cannot open: %s. Skipping.' % spec in err)):
                    module.fail_json(msg='Package at %s could not be installed' % spec, rc=1, changed=False)
        if (rc != 0 and 'Nothing to do' in err) or 'Nothing to do' in out:
            # avoid failing in the 'Nothing To Do' case
            # this may happen with an URL spec.
            # for an already installed group,
            # we get rc = 0 and 'Nothing to do' in out, not in err.
            rc = 0
            err = ''
            out = '%s: Nothing to do' % spec
            changed = False

        res['rc'] = rc
        res['results'].append(out)
        res['msg'] += err

        # FIXME - if we did an install - go and check the rpmdb to see if it actually installed
        # look for each pkg in rpmdb
        # look for each pkg via obsoletes

        # Record change
        res['changed'] = changed

    # Remove rpms downloaded for EL5 via url
    try:
        shutil.rmtree(tempdir)
    except Exception:
        e = get_exception()
        module.fail_json(msg="Failure deleting temp directory %s, %s" % (tempdir, e))

    return res
コード例 #37
0
ファイル: jenkins_plugin.py プロジェクト: sw092180/ansible-1
def main():
    # Module arguments
    argument_spec = url_argument_spec()
    argument_spec.update(
        group=dict(default='jenkins'),
        jenkins_home=dict(default='/var/lib/jenkins'),
        mode=dict(default='0644', type='raw'),
        name=dict(required=True),
        owner=dict(default='jenkins'),
        params=dict(type='dict'),
        state=dict(
            choices=[
                'present',
                'absent',
                'pinned',
                'unpinned',
                'enabled',
                'disabled',
                'latest'],
            default='present'),
        timeout=dict(default=30, type="int"),
        updates_expiration=dict(default=86400, type="int"),
        updates_url=dict(default='https://updates.jenkins-ci.org'),
        url=dict(default='http://localhost:8080'),
        url_password=dict(no_log=True),
        version=dict(),
        with_dependencies=dict(default=True, type='bool'),
    )
    # Module settings
    module = AnsibleModule(
        argument_spec=argument_spec,
        add_file_common_args=True,
        supports_check_mode=True,
    )

    # Params was removed
    # https://meetbot.fedoraproject.org/ansible-meeting/2017-09-28/ansible_dev_meeting.2017-09-28-15.00.log.html
    if module.params['params']:
        module.fail_json(msg="The params option to jenkins_plugin was removed in Ansible 2.5"
                         "since it circumvents Ansible's option handling")

    # Force basic authentication
    module.params['force_basic_auth'] = True

    # Convert timeout to float
    try:
        module.params['timeout'] = float(module.params['timeout'])
    except ValueError:
        e = get_exception()
        module.fail_json(
            msg='Cannot convert %s to float.' % module.params['timeout'],
            details=to_native(e))

    # Set version to latest if state is latest
    if module.params['state'] == 'latest':
        module.params['state'] = 'present'
        module.params['version'] = 'latest'

    # Create some shortcuts
    name = module.params['name']
    state = module.params['state']

    # Initial change state of the task
    changed = False

    # Instantiate the JenkinsPlugin object
    jp = JenkinsPlugin(module)

    # Perform action depending on the requested state
    if state == 'present':
        changed = jp.install()
    elif state == 'absent':
        changed = jp.uninstall()
    elif state == 'pinned':
        changed = jp.pin()
    elif state == 'unpinned':
        changed = jp.unpin()
    elif state == 'enabled':
        changed = jp.enable()
    elif state == 'disabled':
        changed = jp.disable()

    # Print status of the change
    module.exit_json(changed=changed, plugin=name, state=state)
コード例 #38
0
ファイル: jenkins_plugin.py プロジェクト: sw092180/ansible-1
    def _download_updates(self):
        updates_filename = 'jenkins-plugin-cache.json'
        updates_dir = os.path.expanduser('~/.ansible/tmp')
        updates_file = "%s/%s" % (updates_dir, updates_filename)
        download_updates = True

        # Check if we need to download new updates file
        if os.path.isfile(updates_file):
            # Get timestamp when the file was changed last time
            ts_file = os.stat(updates_file).st_mtime
            ts_now = time.time()

            if ts_now - ts_file < self.params['updates_expiration']:
                download_updates = False

        updates_file_orig = updates_file

        # Download the updates file if needed
        if download_updates:
            url = "%s/update-center.json" % self.params['updates_url']

            # Get the data
            r = self._get_url_data(
                url,
                msg_status="Remote updates not found.",
                msg_exception="Updates download failed.")

            # Write the updates file
            update_fd, updates_file = tempfile.mkstemp()
            os.write(update_fd, r.read())

            try:
                os.close(update_fd)
            except IOError:
                e = get_exception()
                self.module.fail_json(
                    msg="Cannot close the tmp updates file %s." % updates_file,
                    details=to_native(e))

        # Open the updates file
        try:
            f = open(updates_file)
        except IOError:
            e = get_exception()
            self.module.fail_json(
                msg="Cannot open temporal updates file.",
                details=to_native(e))

        i = 0
        for line in f:
            # Read only the second line
            if i == 1:
                try:
                    data = json.loads(line)
                except Exception:
                    e = get_exception()
                    self.module.fail_json(
                        msg="Cannot load JSON data from the tmp updates file.",
                        details=e.message)

                break

            i += 1

        # Move the updates file to the right place if we could read it
        if download_updates:
            # Make sure the destination directory exists
            if not os.path.isdir(updates_dir):
                try:
                    os.makedirs(updates_dir, int('0700', 8))
                except OSError:
                    e = get_exception()
                    self.module.fail_json(
                        msg="Cannot create temporal directory.",
                        details=e.message)

            self.module.atomic_move(updates_file, updates_file_orig)

        # Check if we have the plugin data available
        if 'plugins' not in data or self.params['name'] not in data['plugins']:
            self.module.fail_json(
                msg="Cannot find plugin data in the updates file.")

        return data['plugins'][self.params['name']]
コード例 #39
0
ファイル: jenkins_plugin.py プロジェクト: sw092180/ansible-1
    def install(self):
        changed = False
        plugin_file = (
            '%s/plugins/%s.jpi' % (
                self.params['jenkins_home'],
                self.params['name']))

        if not self.is_installed and self.params['version'] is None:
            if not self.module.check_mode:
                # Install the plugin (with dependencies)
                install_script = (
                    'd = Jenkins.instance.updateCenter.getPlugin("%s")'
                    '.deploy(); d.get();' % self.params['name'])

                if self.params['with_dependencies']:
                    install_script = (
                        'Jenkins.instance.updateCenter.getPlugin("%s")'
                        '.getNeededDependencies().each{it.deploy()}; %s' % (
                            self.params['name'], install_script))

                script_data = {
                    'script': install_script
                }
                script_data.update(self.crumb)
                data = urlencode(script_data)

                # Send the installation request
                r = self._get_url_data(
                    "%s/scriptText" % self.url,
                    msg_status="Cannot install plugin.",
                    msg_exception="Plugin installation has failed.",
                    data=data)

                hpi_file = '%s/plugins/%s.hpi' % (
                    self.params['jenkins_home'],
                    self.params['name'])

                if os.path.isfile(hpi_file):
                    os.remove(hpi_file)

            changed = True
        else:
            # Check if the plugin directory exists
            if not os.path.isdir(self.params['jenkins_home']):
                self.module.fail_json(
                    msg="Jenkins home directory doesn't exist.")

            md5sum_old = None
            if os.path.isfile(plugin_file):
                # Make the checksum of the currently installed plugin
                md5sum_old = hashlib.md5(
                    open(plugin_file, 'rb').read()).hexdigest()

            if self.params['version'] in [None, 'latest']:
                # Take latest version
                plugin_url = (
                    "%s/latest/%s.hpi" % (
                        self.params['updates_url'],
                        self.params['name']))
            else:
                # Take specific version
                plugin_url = (
                    "{0}/download/plugins/"
                    "{1}/{2}/{1}.hpi".format(
                        self.params['updates_url'],
                        self.params['name'],
                        self.params['version']))

            if (
                    self.params['updates_expiration'] == 0 or
                    self.params['version'] not in [None, 'latest'] or
                    md5sum_old is None):

                # Download the plugin file directly
                r = self._download_plugin(plugin_url)

                # Write downloaded plugin into file if checksums don't match
                if md5sum_old is None:
                    # No previously installed plugin
                    if not self.module.check_mode:
                        self._write_file(plugin_file, r)

                    changed = True
                else:
                    # Get data for the MD5
                    data = r.read()

                    # Make new checksum
                    md5sum_new = hashlib.md5(data).hexdigest()

                    # If the checksum is different from the currently installed
                    # plugin, store the new plugin
                    if md5sum_old != md5sum_new:
                        if not self.module.check_mode:
                            self._write_file(plugin_file, data)

                        changed = True
            else:
                # Check for update from the updates JSON file
                plugin_data = self._download_updates()

                try:
                    sha1_old = hashlib.sha1(open(plugin_file, 'rb').read())
                except Exception:
                    e = get_exception()
                    self.module.fail_json(
                        msg="Cannot calculate SHA1 of the old plugin.",
                        details=e.message)

                sha1sum_old = base64.b64encode(sha1_old.digest())

                # If the latest version changed, download it
                if sha1sum_old != plugin_data['sha1']:
                    if not self.module.check_mode:
                        r = self._download_plugin(plugin_url)
                        self._write_file(plugin_file, r)

                    changed = True

        # Change file attributes if needed
        if os.path.isfile(plugin_file):
            params = {
                'dest': plugin_file
            }
            params.update(self.params)
            file_args = self.module.load_file_common_arguments(params)

            if not self.module.check_mode:
                # Not sure how to run this in the check mode
                changed = self.module.set_fs_attributes_if_different(
                    file_args, changed)
            else:
                # See the comment above
                changed = True

        return changed
コード例 #40
0
def main():
    argument_spec = basic_auth_argument_spec()
    argument_spec.update(
        dict(state=dict(required=True, choices=['present', 'absent']),
             ssid=dict(required=True, type='str'),
             controller_addresses=dict(type='list'),
             array_wwn=dict(required=False, type='str'),
             array_password=dict(required=False, type='str', no_log=True),
             array_status_timeout_sec=dict(default=60, type='int'),
             enable_trace=dict(default=False, type='bool'),
             meta_tags=dict(type='list')))
    module = AnsibleModule(
        argument_spec=argument_spec,
        supports_check_mode=True,
        mutually_exclusive=[['controller_addresses', 'array_wwn']],
        required_if=[('state', 'present', ['controller_addresses'])])

    p = module.params

    state = p['state']
    ssid = p['ssid']
    controller_addresses = p['controller_addresses']
    array_wwn = p['array_wwn']
    array_password = p['array_password']
    array_status_timeout_sec = p['array_status_timeout_sec']
    validate_certs = p['validate_certs']
    meta_tags = p['meta_tags']
    enable_trace = p['enable_trace']

    api_usr = p['api_username']
    api_pwd = p['api_password']
    api_url = p['api_url']

    changed = False
    array_exists = False

    try:
        (rc, resp) = request(api_url + "/storage-systems/%s" % ssid,
                             headers=dict(Accept="application/json"),
                             url_username=api_usr,
                             url_password=api_pwd,
                             validate_certs=validate_certs,
                             ignore_errors=True)
    except:
        err = get_exception()
        module.fail_json(
            msg="Error accessing storage-system with id [%s]. Error [%s]" %
            (ssid, str(err)))

    array_exists = True
    array_detail = resp

    if rc == 200:
        if state == 'absent':
            changed = True
            array_exists = False
        elif state == 'present':
            current_addresses = frozenset(i for i in (array_detail['ip1'],
                                                      array_detail['ip2'])
                                          if i)
            if set(controller_addresses) != current_addresses:
                changed = True
            if array_detail['wwn'] != array_wwn and array_wwn is not None:
                module.fail_json(
                    msg=
                    'It seems you may have specified a bad WWN. The storage system ID you specified, %s, currently has the WWN of %s'
                    % (ssid, array_detail['wwn']))
    elif rc == 404:
        if state == 'present':
            changed = True
            array_exists = False
        else:
            changed = False
            module.exit_json(changed=changed,
                             msg="Storage system was not present.")

    if changed and not module.check_mode:
        if state == 'present':
            if not array_exists:
                # add the array
                array_add_req = dict(id=ssid,
                                     controllerAddresses=controller_addresses,
                                     metaTags=meta_tags,
                                     enableTrace=enable_trace)

                if array_wwn:
                    array_add_req['wwn'] = array_wwn

                if array_password:
                    array_add_req['password'] = array_password

                post_headers = dict(Accept="application/json")
                post_headers['Content-Type'] = 'application/json'
                request_data = json.dumps(array_add_req)

                try:
                    (rc, resp) = do_post(ssid, api_url, post_headers, api_usr,
                                         api_pwd, validate_certs, request_data,
                                         array_status_timeout_sec)
                except:
                    err = get_exception()
                    module.fail_json(
                        msg=
                        "Failed to add storage system. Id[%s]. Request body [%s]. Error[%s]."
                        % (ssid, request_data, str(err)))

            else:  # array exists, modify...
                post_headers = dict(Accept="application/json")
                post_headers['Content-Type'] = 'application/json'
                post_body = dict(controllerAddresses=controller_addresses,
                                 removeAllTags=True,
                                 enableTrace=enable_trace,
                                 metaTags=meta_tags)

                try:
                    (rc, resp) = do_post(ssid, api_url, post_headers, api_usr,
                                         api_pwd, validate_certs, post_body,
                                         array_status_timeout_sec)
                except:
                    err = get_exception()
                    module.fail_json(
                        msg=
                        "Failed to update storage system. Id[%s]. Request body [%s]. Error[%s]."
                        % (ssid, post_body, str(err)))

        elif state == 'absent':
            # delete the array
            try:
                (rc, resp) = request(api_url + "/storage-systems/%s" % ssid,
                                     method='DELETE',
                                     url_username=api_usr,
                                     url_password=api_pwd,
                                     validate_certs=validate_certs)
            except:
                err = get_exception()
                module.fail_json(
                    msg="Failed to remove storage array. Id[%s]. Error[%s]." %
                    (ssid, str(err)))

            if rc == 422:
                module.exit_json(changed=changed,
                                 msg="Storage system was not presnt.")
            if rc == 204:
                module.exit_json(changed=changed,
                                 msg="Storage system removed.")

    module.exit_json(changed=changed, **resp)
コード例 #41
0
def install_deb(m, debs, cache, force, install_recommends,
                allow_unauthenticated, dpkg_options):
    changed = False
    deps_to_install = []
    pkgs_to_install = []
    for deb_file in debs.split(','):
        try:
            pkg = apt.debfile.DebPackage(deb_file)
            pkg_name = get_field_of_deb(m, deb_file, "Package")
            pkg_version = get_field_of_deb(m, deb_file, "Version")
            if len(apt_pkg.get_architectures()) > 1:
                pkg_arch = get_field_of_deb(m, deb_file, "Architecture")
                pkg_key = "%s:%s" % (pkg_name, pkg_arch)
            else:
                pkg_key = pkg_name
            try:
                installed_pkg = apt.Cache()[pkg_key]
                installed_version = installed_pkg.installed.version
                if package_version_compare(pkg_version,
                                           installed_version) == 0:
                    # Does not need to down-/upgrade, move on to next package
                    continue
            except Exception:
                # Must not be installed, continue with installation
                pass
            # Check if package is installable
            if not pkg.check() and not force:
                m.fail_json(msg=pkg._failure_string)

            # add any missing deps to the list of deps we need
            # to install so they're all done in one shot
            deps_to_install.extend(pkg.missing_deps)

        except Exception:
            e = get_exception()
            m.fail_json(msg="Unable to install package: %s" % str(e))

        # and add this deb to the list of packages to install
        pkgs_to_install.append(deb_file)

    # install the deps through apt
    retvals = {}
    if len(deps_to_install) > 0:
        (success,
         retvals) = install(m=m,
                            pkgspec=deps_to_install,
                            cache=cache,
                            install_recommends=install_recommends,
                            dpkg_options=expand_dpkg_options(dpkg_options))
        if not success:
            m.fail_json(**retvals)
        changed = retvals.get('changed', False)

    if len(pkgs_to_install) > 0:
        options = ' '.join(["--%s" % x for x in dpkg_options.split(",")])
        if m.check_mode:
            options += " --simulate"
        if force:
            options += " --force-all"

        cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install))
        rc, out, err = m.run_command(cmd)
        if "stdout" in retvals:
            stdout = retvals["stdout"] + out
        else:
            stdout = out
        if "diff" in retvals:
            diff = retvals["diff"]
            if 'prepared' in diff:
                diff['prepared'] += '\n\n' + out
        else:
            diff = parse_diff(out)
        if "stderr" in retvals:
            stderr = retvals["stderr"] + err
        else:
            stderr = err

        if rc == 0:
            m.exit_json(changed=True, stdout=stdout, stderr=stderr, diff=diff)
        else:
            m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
    else:
        m.exit_json(changed=changed,
                    stdout=retvals.get('stdout', ''),
                    stderr=retvals.get('stderr', ''),
                    diff=retvals.get('diff', ''))
コード例 #42
0
def main():
    module = AnsibleModule(argument_spec=dict(
        command=dict(required=True),
        chdir=dict(),
        creates=dict(),
        removes=dict(),
        responses=dict(type='dict', required=True),
        timeout=dict(type='int', default=3600),
        echo=dict(type='bool', default=False),
    ))

    if not HAS_PEXPECT:
        module.fail_json(msg='The pexpect python module is required')

    chdir = module.params['chdir']
    args = module.params['command']
    creates = module.params['creates']
    removes = module.params['removes']
    responses = module.params['responses']
    timeout = module.params['timeout']
    echo = module.params['echo']

    events = dict()
    for key, value in responses.items():
        if isinstance(value, list):
            response = response_closure(module, key, value)
        else:
            response = u'%s\n' % value.rstrip('\n').decode()

        events[key.decode()] = response

    if args.strip() == '':
        module.fail_json(rc=256, msg="no command given")

    if chdir:
        chdir = os.path.abspath(os.path.expanduser(chdir))
        os.chdir(chdir)

    if creates:
        # do not run the command if the line contains creates=filename
        # and the filename already exists.  This allows idempotence
        # of command executions.
        v = os.path.expanduser(creates)
        if os.path.exists(v):
            module.exit_json(cmd=args,
                             stdout="skipped, since %s exists" % v,
                             changed=False,
                             rc=0)

    if removes:
        # do not run the command if the line contains removes=filename
        # and the filename does not exist.  This allows idempotence
        # of command executions.
        v = os.path.expanduser(removes)
        if not os.path.exists(v):
            module.exit_json(cmd=args,
                             stdout="skipped, since %s does not exist" % v,
                             changed=False,
                             rc=0)

    startd = datetime.datetime.now()

    try:
        try:
            # Prefer pexpect.run from pexpect>=4
            out, rc = pexpect.run(args,
                                  timeout=timeout,
                                  withexitstatus=True,
                                  events=events,
                                  cwd=chdir,
                                  echo=echo,
                                  encoding='utf-8')
        except TypeError:
            # Use pexpect.runu in pexpect>=3.3,<4
            out, rc = pexpect.runu(args,
                                   timeout=timeout,
                                   withexitstatus=True,
                                   events=events,
                                   cwd=chdir,
                                   echo=echo)
    except (TypeError, AttributeError):
        e = get_exception()
        # This should catch all insufficient versions of pexpect
        # We deem them insufficient for their lack of ability to specify
        # to not echo responses via the run/runu functions, which would
        # potentially leak sensentive information
        module.fail_json(msg='Insufficient version of pexpect installed '
                         '(%s), this module requires pexpect>=3.3. '
                         'Error was %s' % (pexpect.__version__, e))
    except pexpect.ExceptionPexpect:
        e = get_exception()
        module.fail_json(msg='%s' % e)

    endd = datetime.datetime.now()
    delta = endd - startd

    if out is None:
        out = ''

    ret = dict(
        cmd=args,
        stdout=out.rstrip('\r\n'),
        rc=rc,
        start=str(startd),
        end=str(endd),
        delta=str(delta),
        changed=True,
    )

    if rc is not None:
        module.exit_json(**ret)
    else:
        ret['msg'] = 'command exceeded timeout'
        module.fail_json(**ret)
コード例 #43
0
ファイル: jira.py プロジェクト: unndevops/ansible
def main():

    global module
    module = AnsibleModule(
        argument_spec=dict(
            uri=dict(required=True),
            operation=dict(choices=['create', 'comment', 'edit', 'fetch', 'transition', 'link'],
                           aliases=['command'], required=True),
            username=dict(required=True),
            password=dict(required=True, no_log=True),
            project=dict(),
            summary=dict(),
            description=dict(),
            issuetype=dict(),
            issue=dict(aliases=['ticket']),
            comment=dict(),
            status=dict(),
            assignee=dict(),
            fields=dict(default={}, type='dict'),
            linktype=dict(),
            inwardissue=dict(),
            outwardissue=dict(),
            timeout=dict(type='float', default=10),
        ),
        supports_check_mode=False
    )

    op = module.params['operation']

    # Check we have the necessary per-operation parameters
    missing = []
    for parm in OP_REQUIRED[op]:
        if not module.params[parm]:
            missing.append(parm)
    if missing:
        module.fail_json(msg="Operation %s require the following missing parameters: %s" % (op, ",".join(missing)))

    # Handle rest of parameters
    uri = module.params['uri']
    user = module.params['username']
    passwd = module.params['password']
    if module.params['assignee']:
        module.params['fields']['assignee'] = { 'name': module.params['assignee'] }

    if not uri.endswith('/'):
        uri = uri+'/'
    restbase = uri + 'rest/api/2'

    # Dispatch
    try:

        # Lookup the corresponding method for this operation. This is
        # safe as the AnsibleModule should remove any unknown operations.
        thismod = sys.modules[__name__]
        method = getattr(thismod, op)

        ret = method(restbase, user, passwd, module.params)

    except Exception:
        e = get_exception()
        return module.fail_json(msg=e.message)


    module.exit_json(changed=True, meta=ret)
コード例 #44
0
    def expand_volume(self):
        is_thin = self.volume_detail['thinProvisioned']
        if is_thin:
            # TODO: support manual repo expansion as well
            self.debug('expanding thin volume')
            thin_volume_expand_req = dict(newVirtualSize=self.size,
                                          sizeUnit=self.size_unit)
            try:
                (rc,
                 resp) = request(self.api_url +
                                 "/storage-systems/%s/thin-volumes/%s/expand" %
                                 (self.ssid, self.volume_detail['id']),
                                 data=json.dumps(thin_volume_expand_req),
                                 headers=self._post_headers,
                                 method='POST',
                                 url_username=self.api_usr,
                                 url_password=self.api_pwd,
                                 validate_certs=self.validate_certs,
                                 timeout=120)
            except Exception:
                err = get_exception()
                self.module.fail_json(
                    msg=
                    "Failed to expand thin volume.  Volume [%s].  Array Id [%s]. Error[%s]."
                    % (self.name, self.ssid, str(err)))

                # TODO: check return code
        else:
            self.debug('expanding volume')
            volume_expand_req = dict(expansionSize=self.size,
                                     sizeUnit=self.size_unit)
            try:
                (rc, resp) = request(self.api_url +
                                     "/storage-systems/%s/volumes/%s/expand" %
                                     (self.ssid, self.volume_detail['id']),
                                     data=json.dumps(volume_expand_req),
                                     headers=self._post_headers,
                                     method='POST',
                                     url_username=self.api_usr,
                                     url_password=self.api_pwd,
                                     validate_certs=self.validate_certs,
                                     timeout=120)
            except Exception:
                err = get_exception()
                self.module.fail_json(
                    msg=
                    "Failed to expand volume.  Volume [%s].  Array Id [%s]. Error[%s]."
                    % (self.name, self.ssid, str(err)))

            self.debug('polling for completion...')

            while True:
                try:
                    (rc,
                     resp) = request(self.api_url +
                                     "/storage-systems/%s/volumes/%s/expand" %
                                     (self.ssid, self.volume_detail['id']),
                                     method='GET',
                                     url_username=self.api_usr,
                                     url_password=self.api_pwd,
                                     validate_certs=self.validate_certs)
                except Exception:
                    err = get_exception()
                    self.module.fail_json(
                        msg=
                        "Failed to get volume expansion progress.  Volume [%s].  Array Id [%s]. Error[%s]."
                        % (self.name, self.ssid, str(err)))

                action = resp['action']
                percent_complete = resp['percentComplete']

                self.debug('expand action %s, %s complete...' %
                           (action, percent_complete))

                if action == 'none':
                    self.debug('expand complete')
                    break
                else:
                    time.sleep(5)
コード例 #45
0
def main():

    module = AnsibleModule(
        argument_spec=dict(
            state=dict(choices=['file', 'directory', 'link', 'hard', 'touch', 'absent'], default=None),
            path=dict(aliases=['dest', 'name'], required=True, type='path'),
            original_basename=dict(required=False),  # Internal use only, for recursive ops
            recurse=dict(default=False, type='bool'),
            force=dict(required=False, default=False, type='bool'),
            diff_peek=dict(default=None),  # Internal use only, for internal checks in the action plugins
            validate=dict(required=False, default=None),  # Internal use only, for template and copy
            src=dict(required=False, default=None, type='path'),
        ),
        add_file_common_args=True,
        supports_check_mode=True
    )

    params = module.params
    state = params['state']
    force = params['force']
    diff_peek = params['diff_peek']
    src = params['src']
    b_src = to_bytes(src, errors='surrogate_or_strict')
    follow = params['follow']

    # modify source as we later reload and pass, specially relevant when used by other modules.
    path = params['path']
    b_path = to_bytes(path, errors='surrogate_or_strict')

    # short-circuit for diff_peek
    if diff_peek is not None:
        appears_binary = False
        try:
            f = open(b_path, 'rb')
            head = f.read(8192)
            f.close()
            if b("\x00") in head:
                appears_binary = True
        except:
            pass
        module.exit_json(path=path, changed=False, appears_binary=appears_binary)

    prev_state = get_state(b_path)

    # state should default to file, but since that creates many conflicts,
    # default to 'current' when it exists.
    if state is None:
        if prev_state != 'absent':
            state = prev_state
        else:
            state = 'file'

    # source is both the source of a symlink or an informational passing of the src for a template module
    # or copy module, even if this module never uses it, it is needed to key off some things
    if src is None:
        if state in ('link', 'hard'):
            if follow and state == 'link':
                # use the current target of the link as the source
                src = to_native(os.path.realpath(b_path), errors='strict')
            else:
                module.fail_json(msg='src and dest are required for creating links')

    # original_basename is used by other modules that depend on file.
    if os.path.isdir(b_path) and state not in ("link", "absent"):
        basename = None
        if params['original_basename']:
            basename = params['original_basename']
        elif src is not None:
            basename = os.path.basename(src)
        if basename:
            params['path'] = path = os.path.join(path, basename)
            b_path = to_bytes(path, errors='surrogate_or_strict')

    # make sure the target path is a directory when we're doing a recursive operation
    recurse = params['recurse']
    if recurse and state != 'directory':
        module.fail_json(path=path, msg="recurse option requires state to be 'directory'")

    file_args = module.load_file_common_arguments(params)

    changed = False
    diff = {'before': {'path': path},
            'after': {'path': path},
            }

    state_change = False
    if prev_state != state:
        diff['before']['state'] = prev_state
        diff['after']['state'] = state
        state_change = True

    if state == 'absent':
        if state_change:
            if not module.check_mode:
                if prev_state == 'directory':
                    try:
                        shutil.rmtree(b_path, ignore_errors=False)
                    except Exception:
                        e = get_exception()
                        module.fail_json(msg="rmtree failed: %s" % str(e))
                else:
                    try:
                        os.unlink(b_path)
                    except Exception:
                        e = get_exception()
                        module.fail_json(path=path, msg="unlinking failed: %s " % str(e))
            module.exit_json(path=path, changed=True, diff=diff)
        else:
            module.exit_json(path=path, changed=False)

    elif state == 'file':

        if state_change:
            if follow and prev_state == 'link':
                # follow symlink and operate on original
                b_path = os.path.realpath(b_path)
                path = to_native(b_path, errors='strict')
                prev_state = get_state(b_path)
                file_args['path'] = path

        if prev_state not in ('file', 'hard'):
            # file is not absent and any other state is a conflict
            module.fail_json(path=path, msg='file (%s) is %s, cannot continue' % (path, prev_state))

        changed = module.set_fs_attributes_if_different(file_args, changed, diff)
        module.exit_json(path=path, changed=changed, diff=diff)

    elif state == 'directory':
        if follow and prev_state == 'link':
            b_path = os.path.realpath(b_path)
            path = to_native(b_path, errors='strict')
            prev_state = get_state(b_path)

        if prev_state == 'absent':
            if module.check_mode:
                module.exit_json(changed=True, diff=diff)
            changed = True
            curpath = ''

            try:
                # Split the path so we can apply filesystem attributes recursively
                # from the root (/) directory for absolute paths or the base path
                # of a relative path.  We can then walk the appropriate directory
                # path to apply attributes.
                for dirname in path.strip('/').split('/'):
                    curpath = '/'.join([curpath, dirname])
                    # Remove leading slash if we're creating a relative path
                    if not os.path.isabs(path):
                        curpath = curpath.lstrip('/')
                    b_curpath = to_bytes(curpath, errors='surrogate_or_strict')
                    if not os.path.exists(b_curpath):
                        try:
                            os.mkdir(b_curpath)
                        except OSError:
                            ex = get_exception()
                            # Possibly something else created the dir since the os.path.exists
                            # check above. As long as it's a dir, we don't need to error out.
                            if not (ex.errno == errno.EEXIST and os.path.isdir(b_curpath)):
                                raise
                        tmp_file_args = file_args.copy()
                        tmp_file_args['path'] = curpath
                        changed = module.set_fs_attributes_if_different(tmp_file_args, changed, diff)
            except Exception:
                e = get_exception()
                module.fail_json(path=path, msg='There was an issue creating %s as requested: %s' % (curpath, str(e)))

        # We already know prev_state is not 'absent', therefore it exists in some form.
        elif prev_state != 'directory':
            module.fail_json(path=path, msg='%s already exists as a %s' % (path, prev_state))

        changed = module.set_fs_attributes_if_different(file_args, changed, diff)

        if recurse:
            changed |= recursive_set_attributes(module, to_bytes(file_args['path'], errors='surrogate_or_strict'), follow, file_args)

        module.exit_json(path=path, changed=changed, diff=diff)

    elif state in ('link', 'hard'):

        if os.path.isdir(b_path) and not os.path.islink(b_path):
            relpath = path
        else:
            b_relpath = os.path.dirname(b_path)
            relpath = to_native(b_relpath, errors='strict')

        absrc = os.path.join(relpath, src)
        b_absrc = to_bytes(absrc, errors='surrogate_or_strict')
        if not os.path.exists(b_absrc) and not force:
            module.fail_json(path=path, src=src, msg='src file does not exist, use "force=yes" if you really want to create the link: %s' % absrc)

        if state == 'hard':
            if not os.path.isabs(b_src):
                module.fail_json(msg="absolute paths are required")
        elif prev_state == 'directory':
            if not force:
                module.fail_json(path=path, msg='refusing to convert between %s and %s for %s' % (prev_state, state, path))
            elif len(os.listdir(b_path)) > 0:
                # refuse to replace a directory that has files in it
                module.fail_json(path=path, msg='the directory %s is not empty, refusing to convert it' % path)
        elif prev_state in ('file', 'hard') and not force:
            module.fail_json(path=path, msg='refusing to convert between %s and %s for %s' % (prev_state, state, path))

        if prev_state == 'absent':
            changed = True
        elif prev_state == 'link':
            b_old_src = os.readlink(b_path)
            if b_old_src != b_src:
                changed = True
        elif prev_state == 'hard':
            if not (state == 'hard' and os.stat(b_path).st_ino == os.stat(b_src).st_ino):
                changed = True
                if not force:
                    module.fail_json(dest=path, src=src, msg='Cannot link, different hard link exists at destination')
        elif prev_state in ('file', 'directory'):
            changed = True
            if not force:
                module.fail_json(dest=path, src=src, msg='Cannot link, %s exists at destination' % prev_state)
        else:
            module.fail_json(dest=path, src=src, msg='unexpected position reached')

        if changed and not module.check_mode:
            if prev_state != 'absent':
                # try to replace atomically
                b_tmppath = to_bytes(os.path.sep).join(
                    [os.path.dirname(b_path), to_bytes(".%s.%s.tmp" % (os.getpid(), time.time()))]
                )
                try:
                    if prev_state == 'directory' and (state == 'hard' or state == 'link'):
                        os.rmdir(b_path)
                    if state == 'hard':
                        os.link(b_src, b_tmppath)
                    else:
                        os.symlink(b_src, b_tmppath)
                    os.rename(b_tmppath, b_path)
                except OSError:
                    e = get_exception()
                    if os.path.exists(b_tmppath):
                        os.unlink(b_tmppath)
                    module.fail_json(path=path, msg='Error while replacing: %s' % to_native(e, nonstring='simplerepr'))
            else:
                try:
                    if state == 'hard':
                        os.link(b_src, b_path)
                    else:
                        os.symlink(b_src, b_path)
                except OSError:
                    e = get_exception()
                    module.fail_json(path=path, msg='Error while linking: %s' % to_native(e, nonstring='simplerepr'))

        if module.check_mode and not os.path.exists(b_path):
            module.exit_json(dest=path, src=src, changed=changed, diff=diff)

        changed = module.set_fs_attributes_if_different(file_args, changed, diff)
        module.exit_json(dest=path, src=src, changed=changed, diff=diff)

    elif state == 'touch':
        if not module.check_mode:

            if prev_state == 'absent':
                try:
                    open(b_path, 'wb').close()
                except OSError:
                    e = get_exception()
                    module.fail_json(path=path, msg='Error, could not touch target: %s' % to_native(e, nonstring='simplerepr'))
            elif prev_state in ('file', 'directory', 'hard'):
                try:
                    os.utime(b_path, None)
                except OSError:
                    e = get_exception()
                    module.fail_json(path=path, msg='Error while touching existing target: %s' % to_native(e, nonstring='simplerepr'))
            else:
                module.fail_json(msg='Cannot touch other than files, directories, and hardlinks (%s is %s)' % (path, prev_state))
            try:
                module.set_fs_attributes_if_different(file_args, True, diff)
            except SystemExit:
                e = get_exception()
                if e.code:
                    # We take this to mean that fail_json() was called from
                    # somewhere in basic.py
                    if prev_state == 'absent':
                        # If we just created the file we can safely remove it
                        os.remove(b_path)
                raise e

        module.exit_json(dest=path, changed=True, diff=diff)

    module.fail_json(path=path, msg='unexpected position reached')
コード例 #46
0
ファイル: mongodb_parameter.py プロジェクト: saran410/Devops
def main():
    module = AnsibleModule(
        argument_spec=dict(
            login_user=dict(default=None),
            login_password=dict(default=None, no_log=True),
            login_host=dict(default='localhost'),
            login_port=dict(default=27017, type='int'),
            login_database=dict(default=None),
            replica_set=dict(default=None),
            param=dict(default=None, required=True),
            value=dict(default=None, required=True),
            param_type=dict(default="str", choices=['str', 'int']),
            ssl=dict(default=False, type='bool'),
        )
    )

    if not pymongo_found:
        module.fail_json(msg='the python pymongo module is required')

    login_user = module.params['login_user']
    login_password = module.params['login_password']
    login_host = module.params['login_host']
    login_port = module.params['login_port']
    login_database = module.params['login_database']

    replica_set = module.params['replica_set']
    ssl = module.params['ssl']

    param = module.params['param']
    param_type = module.params['param_type']
    value = module.params['value']

    # Verify parameter is coherent with specified type
    try:
        if param_type == 'int':
            value = int(value)
    except ValueError:
        e = get_exception()
        module.fail_json(msg="value '%s' is not %s" % (value, param_type))

    try:
        if replica_set:
            client = MongoClient(login_host, int(login_port), replicaset=replica_set, ssl=ssl)
        else:
            client = MongoClient(login_host, int(login_port), ssl=ssl)

        if login_user is None and login_password is None:
            mongocnf_creds = load_mongocnf()
            if mongocnf_creds is not False:
                login_user = mongocnf_creds['user']
                login_password = mongocnf_creds['password']
        elif login_password is None or login_user is None:
            module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')

        if login_user is not None and login_password is not None:
            client.admin.authenticate(login_user, login_password, source=login_database)

    except ConnectionFailure:
        e = get_exception()
        module.fail_json(msg='unable to connect to database: %s' % str(e))

    db = client.admin

    try:
        after_value = db.command("setParameter", **{param: value})
    except OperationFailure:
        e = get_exception()
        module.fail_json(msg="unable to change parameter: %s" % str(e))

    if "was" not in after_value:
        module.exit_json(changed=True, msg="Unable to determine old value, assume it changed.")
    else:
        module.exit_json(changed=(value != after_value["was"]), before=after_value["was"],
                         after=value)
コード例 #47
0
def main():

    module = AnsibleModule(argument_spec=dict(
        resource=dict(choices=['topic'], default='topic'
                      ),  # resource managed, more to come (acl,broker)
        name=dict(type='str', required=True),  # resource name
        partitions=dict(
            type='int', required=False, default=0
        ),  # currently required since only resource topic is available
        replica_factor=dict(
            type='int', required=False, default=0
        ),  # currently required since only resource topic is available
        state=dict(choices=['present', 'absent'], default='present'),
        options=dict(required=False, type='dict', default=None),
        zookeeper=dict(type='str', required=False),
        zookeeper_auth_scheme=dict(choices=['digest', 'sasl'],
                                   default='digest'),
        zookeeper_auth_value=dict(
            type='str', no_log=True, required=False, default=''),
        bootstrap_servers=dict(type='str', required=True),
        security_protocol=dict(
            choices=['PLAINTEXT', 'SSL', 'SASL_SSL', 'SASL_PLAINTEXT'],
            default='PLAINTEXT'),
        api_version=dict(type='str', required=True, default=None),
        ssl_check_hostname=dict(default=True, type='bool', required=False),
        ssl_cafile=dict(required=False, default=None, type='path'),
        ssl_certfile=dict(required=False, default=None, type='path'),
        ssl_keyfile=dict(
            required=False, default=None, no_log=True, type='path'),
        ssl_password=dict(type='str', no_log=True, required=False),
        ssl_crlfile=dict(required=False, default=None, type='path'),
        sasl_mechanism=dict(choices=['PLAIN', 'GSSAPI'], default='PLAIN'
                            ),  # only PLAIN is currently available
        sasl_plain_username=dict(type='str', required=False),
        sasl_plain_password=dict(type='str', no_log=True, required=False),
        sasl_kerberos_service_name=dict(type='str', required=False),
    ))

    params = module.params

    resource = params['resource']
    name = params['name']
    partitions = params['partitions']
    replica_factor = params['replica_factor']
    state = params['state']
    zookeeper = params['zookeeper']
    zookeeper_auth_scheme = params['zookeeper_auth_scheme']
    zookeeper_auth_value = params['zookeeper_auth_value']
    bootstrap_servers = params['bootstrap_servers']
    security_protocol = params['security_protocol']
    ssl_check_hostname = params['ssl_check_hostname']
    ssl_cafile = params['ssl_cafile']
    ssl_certfile = params['ssl_certfile']
    ssl_keyfile = params['ssl_keyfile']
    ssl_password = params['ssl_password']
    ssl_crlfile = params['ssl_crlfile']
    sasl_mechanism = params['sasl_mechanism']
    sasl_plain_username = params['sasl_plain_username']
    sasl_plain_password = params['sasl_plain_password']
    sasl_kerberos_service_name = params['sasl_kerberos_service_name']

    api_version = tuple(params['api_version'].strip(".").split("."))

    options = []
    if params['options'] != None:
        options = params['options'].items()

    ssl_files = {
        'cafile': {
            'path': ssl_cafile,
            'is_temp': False
        },
        'certfile': {
            'path': ssl_certfile,
            'is_temp': False
        },
        'keyfile': {
            'path': ssl_keyfile,
            'is_temp': False
        },
        'crlfile': {
            'path': ssl_crlfile,
            'is_temp': False
        }
    }
    for key, value in ssl_files.items():
        if value['path'] is not None:
            # TODO is that condition sufficient?
            if value['path'].startswith("-----BEGIN"):
                # value is a content, need to create a tempfile
                fd, path = tempfile.mkstemp(prefix=key)
                with os.fdopen(fd, 'w') as tmp:
                    tmp.write(value['path'])
                ssl_files[key]['path'] = path
                ssl_files[key]['is_temp'] = True
            elif not os.path.exists(os.path.dirname(value['path'])):
                # value is not a content, but path does not exist, fails the module
                module.fail_json(
                    msg=
                    '\'%s\' is not a content and provided path does not exist, please check your SSL configuration.'
                    % key)

    zookeeper_auth = []
    if zookeeper_auth_value != '':
        auth = (zookeeper_auth_scheme, zookeeper_auth_value)
        zookeeper_auth.append(auth)

    try:
        manager = KafkaManager(
            module=module,
            bootstrap_servers=bootstrap_servers,
            security_protocol=security_protocol,
            api_version=api_version,
            ssl_check_hostname=ssl_check_hostname,
            ssl_cafile=ssl_files['cafile']['path'],
            ssl_certfile=ssl_files['certfile']['path'],
            ssl_keyfile=ssl_files['keyfile']['path'],
            ssl_password=ssl_password,
            ssl_crlfile=ssl_files['crlfile']['path'],
            sasl_mechanism=sasl_mechanism,
            sasl_plain_username=sasl_plain_username,
            sasl_plain_password=sasl_plain_password,
            sasl_kerberos_service_name=sasl_kerberos_service_name)
    except Exception:
        e = get_exception()
        module.fail_json(msg='Error while initializing Kafka client : %s ' %
                         str(e))

    changed = False

    if parse_version(manager.get_api_version()) < parse_version('0.11.0'):
        module.fail_json(
            msg=
            'Current version of library is not compatible with Kafka < 0.11.0.'
        )

    msg = '%s \'%s\': ' % (resource, name)

    if resource == 'topic':
        if state == 'present':
            if name in manager.get_topics():
                # topic is already there
                if zookeeper != '' and partitions > 0 and replica_factor > 0:
                    try:
                        manager.init_zk_client(zookeeper, zookeeper_auth)
                    except Exception:
                        e = get_exception()
                        module.fail_json(
                            msg=
                            'Error while initializing Zookeeper client : %s. Is your Zookeeper server available and running on \'%s\'?'
                            % (str(e), zookeeper))

                    if manager.is_topic_configuration_need_update(
                            name, options):
                        manager.update_topic_configuration(name, options)
                        changed = True

                    if manager.is_topic_replication_need_update(
                            name, replica_factor):
                        json_assignment = manager.get_assignment_for_replica_factor_update(
                            name, replica_factor)
                        manager.update_admin_assignment(json_assignment)
                        changed = True

                    if manager.is_topic_partitions_need_update(
                            name, partitions):
                        if parse_version(manager.get_api_version()
                                         ) < parse_version('1.0.0'):
                            json_assignment = manager.get_assignment_for_partition_update(
                                name, partitions)
                            zknode = '/brokers/topics/%s' % name
                            manager.update_topic_assignment(
                                json_assignment, zknode)
                        else:
                            manager.update_topic_partitions(name, partitions)
                        changed = True
                    manager.close_zk_client()
                    if changed:
                        msg += 'successfully updated.'
                else:
                    module.fail_json(
                        msg=
                        '\'zookeeper\', \'partitions\' and \'replica_factor\' parameters are needed when parameter \'state\' is \'present\''
                    )
            else:
                # topic is absent
                manager.create_topic(name=name,
                                     partitions=partitions,
                                     replica_factor=replica_factor,
                                     config_entries=options)
                changed = True
                msg += 'successfully created.'
        elif state == 'absent':
            if name in manager.get_topics():
                # delete topic
                manager.delete_topic(name)
                changed = True
                msg += 'successfully deleted.'

    manager.close()
    for key, value in ssl_files.items():
        if value['path'] is not None and value['is_temp'] and os.path.exists(
                os.path.dirname(value['path'])):
            os.remove(value['path'])

    if not changed:
        msg += 'nothing to do.'

    module.exit_json(changed=changed, msg=msg)
コード例 #48
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            name=dict(required=True),
            state=dict(choices=[
                'started', 'stopped', 'restarted', 'killed', 'reloaded', 'once'
            ]),
            enabled=dict(required=False, type='bool'),
            downed=dict(required=False, type='bool'),
            dist=dict(required=False, default='daemontools'),
            service_dir=dict(required=False, default='/service'),
            service_src=dict(required=False, default='/etc/service'),
        ),
        supports_check_mode=True,
    )

    module.run_command_environ_update = dict(LANG='C',
                                             LC_ALL='C',
                                             LC_MESSAGES='C',
                                             LC_CTYPE='C')

    state = module.params['state']
    enabled = module.params['enabled']
    downed = module.params['downed']

    svc = Svc(module)
    changed = False
    orig_state = svc.report()

    if enabled is not None and enabled != svc.enabled:
        changed = True
        if not module.check_mode:
            try:
                if enabled:
                    svc.enable()
                else:
                    svc.disable()
            except (OSError, IOError):
                e = get_exception()
                module.fail_json(msg="Could change service link: %s" % str(e))

    if state is not None and state != svc.state:
        changed = True
        if not module.check_mode:
            getattr(svc, state[:-2])()

    if downed is not None and downed != svc.downed:
        changed = True
        if not module.check_mode:
            d_file = "%s/down" % svc.svc_full
            try:
                if downed:
                    open(d_file, "a").close()
                else:
                    os.unlink(d_file)
            except (OSError, IOError):
                e = get_exception()
                module.fail_json(msg="Could change downed file: %s " %
                                 (str(e)))

    module.exit_json(changed=changed, svc=svc.report())
コード例 #49
0
ファイル: bigpanda.py プロジェクト: saran410/Devops
def main():

    module = AnsibleModule(
        argument_spec=dict(
            component=dict(required=True, aliases=['name']),
            version=dict(required=True),
            token=dict(required=True, no_log=True),
            state=dict(required=True,
                       choices=['started', 'finished', 'failed']),
            hosts=dict(required=False,
                       default=[socket.gethostname()],
                       aliases=['host']),
            env=dict(required=False),
            owner=dict(required=False),
            description=dict(required=False),
            message=dict(required=False),
            source_system=dict(required=False, default='ansible'),
            validate_certs=dict(default='yes', type='bool'),
            url=dict(required=False, default='https://api.bigpanda.io'),
        ),
        supports_check_mode=True,
        check_invalid_arguments=False,
    )

    token = module.params['token']
    state = module.params['state']
    url = module.params['url']

    # Build the common request body
    body = dict()
    for k in ('component', 'version', 'hosts'):
        v = module.params[k]
        if v is not None:
            body[k] = v

    if not isinstance(body['hosts'], list):
        body['hosts'] = [body['hosts']]

    # Insert state-specific attributes to body
    if state == 'started':
        for k in ('source_system', 'env', 'owner', 'description'):
            v = module.params[k]
            if v is not None:
                body[k] = v

        request_url = url + '/data/events/deployments/start'
    else:
        message = module.params['message']
        if message is not None:
            body['errorMessage'] = message

        if state == 'finished':
            body['status'] = 'success'
        else:
            body['status'] = 'failure'

        request_url = url + '/data/events/deployments/end'

    # Build the deployment object we return
    deployment = dict(token=token, url=url)
    deployment.update(body)
    if 'errorMessage' in deployment:
        message = deployment.pop('errorMessage')
        deployment['message'] = message

    # If we're in check mode, just exit pretending like we succeeded
    if module.check_mode:
        module.exit_json(changed=True, **deployment)

    # Send the data to bigpanda
    data = json.dumps(body)
    headers = {
        'Authorization': 'Bearer %s' % token,
        'Content-Type': 'application/json'
    }
    try:
        response, info = fetch_url(module,
                                   request_url,
                                   data=data,
                                   headers=headers)
        if info['status'] == 200:
            module.exit_json(changed=True, **deployment)
        else:
            module.fail_json(msg=json.dumps(info))
    except Exception:
        e = get_exception()
        module.fail_json(msg=str(e))
コード例 #50
0
def main():

    module = AnsibleModule(argument_spec=dict(
        user=dict(required=True),
        password=dict(required=True, no_log=True),
        to=dict(required=True),
        msg=dict(required=True),
        host=dict(required=False),
        port=dict(required=False, default=5222),
        encoding=dict(required=False),
    ),
                           supports_check_mode=True)

    if not HAS_XMPP:
        module.fail_json(
            msg="The required python xmpp library (xmpppy) is not installed")

    jid = xmpp.JID(module.params['user'])
    user = jid.getNode()
    server = jid.getDomain()
    port = module.params['port']
    password = module.params['password']
    try:
        to, nick = module.params['to'].split('/', 1)
    except ValueError:
        to, nick = module.params['to'], None

    if module.params['host']:
        host = module.params['host']
    else:
        host = server
    if module.params['encoding']:
        xmpp.simplexml.ENCODING = module.params['encoding']

    msg = xmpp.protocol.Message(body=module.params['msg'])

    try:
        conn = xmpp.Client(server, debug=[])
        if not conn.connect(server=(host, port)):
            module.fail_json(rc=1,
                             msg='Failed to connect to server: %s' % (server))
        if not conn.auth(user, password, 'Ansible'):
            module.fail_json(rc=1,
                             msg='Failed to authorize %s on: %s' %
                             (user, server))
        # some old servers require this, also the sleep following send
        conn.sendInitPresence(requestRoster=0)

        if nick:  # sending to room instead of user, need to join
            msg.setType('groupchat')
            msg.setTag('x', namespace='http://jabber.org/protocol/muc#user')
            conn.send(xmpp.Presence(to=module.params['to']))
            time.sleep(1)
        else:
            msg.setType('chat')

        msg.setTo(to)
        if not module.check_mode:
            conn.send(msg)
        time.sleep(1)
        conn.disconnect()
    except Exception:
        e = get_exception()
        module.fail_json(msg="unable to send msg: %s" % e)

    module.exit_json(changed=False, to=to, user=user, msg=msg.getBody())
コード例 #51
0
def main():

    # Load RHSM configuration from file
    rhsm = Rhsm(None)

    module = AnsibleModule(
        argument_spec=dict(
            state=dict(default='present', choices=['present', 'absent']),
            username=dict(default=None, required=False),
            password=dict(default=None, required=False, no_log=True),
            server_hostname=dict(
                default=rhsm.config.get_option('server.hostname'),
                required=False),
            server_insecure=dict(
                default=rhsm.config.get_option('server.insecure'),
                required=False),
            rhsm_baseurl=dict(default=rhsm.config.get_option('rhsm.baseurl'),
                              required=False),
            autosubscribe=dict(default=False, type='bool'),
            activationkey=dict(default=None, required=False),
            org_id=dict(default=None, required=False),
            environment=dict(default=None, required=False, type='str'),
            pool=dict(default='^$', required=False, type='str'),
            consumer_type=dict(default=None, required=False),
            consumer_name=dict(default=None, required=False),
            consumer_id=dict(default=None, required=False),
            force_register=dict(default=False, type='bool'),
        ),
        required_together=[['username', 'password'],
                           ['activationkey', 'org_id']],
        mutually_exclusive=[['username', 'activationkey']],
        required_if=[['state', 'present', ['username', 'activationkey'],
                      True]],
    )

    rhsm.module = module
    state = module.params['state']
    username = module.params['username']
    password = module.params['password']
    server_hostname = module.params['server_hostname']
    server_insecure = module.params['server_insecure']
    rhsm_baseurl = module.params['rhsm_baseurl']
    autosubscribe = module.params['autosubscribe']
    activationkey = module.params['activationkey']
    org_id = module.params['org_id']
    environment = module.params['environment']
    pool = module.params['pool']
    consumer_type = module.params["consumer_type"]
    consumer_name = module.params["consumer_name"]
    consumer_id = module.params["consumer_id"]
    force_register = module.params["force_register"]

    global SUBMAN_CMD
    SUBMAN_CMD = module.get_bin_path('subscription-manager', True)

    # Ensure system is registered
    if state == 'present':

        # Register system
        if rhsm.is_registered and not force_register:
            if pool != '^$':
                try:
                    result = rhsm.update_subscriptions(pool)
                except Exception:
                    e = get_exception()
                    module.fail_json(
                        msg="Failed to update subscriptions for '%s': %s" %
                        (server_hostname, e))
                else:
                    module.exit_json(**result)
            else:
                module.exit_json(changed=False,
                                 msg="System already registered.")
        else:
            try:
                rhsm.enable()
                rhsm.configure(**module.params)
                rhsm.register(username, password, autosubscribe, activationkey,
                              org_id, consumer_type, consumer_name,
                              consumer_id, force_register, environment,
                              rhsm_baseurl, server_insecure)
                subscribed_pool_ids = rhsm.subscribe(pool)
            except Exception:
                e = get_exception()
                module.fail_json(msg="Failed to register with '%s': %s" %
                                 (server_hostname, e))
            else:
                module.exit_json(
                    changed=True,
                    msg="System successfully registered to '%s'." %
                    server_hostname,
                    subscribed_pool_ids=subscribed_pool_ids)
    # Ensure system is *not* registered
    if state == 'absent':
        if not rhsm.is_registered:
            module.exit_json(changed=False, msg="System already unregistered.")
        else:
            try:
                rhsm.unsubscribe()
                rhsm.unregister()
            except Exception:
                e = get_exception()
                module.fail_json(msg="Failed to unregister: %s" % e)
            else:
                module.exit_json(
                    changed=True,
                    msg="System successfully unregistered from %s." %
                    server_hostname)
コード例 #52
0
ファイル: sns.py プロジェクト: sharma220/ansible-1
def main():
    argument_spec = ec2_argument_spec()
    argument_spec.update(
        dict(
            msg=dict(type='str', required=True, aliases=['default']),
            subject=dict(type='str', default=None),
            topic=dict(type='str', required=True),
            email=dict(type='str', default=None),
            sqs=dict(type='str', default=None),
            sms=dict(type='str', default=None),
            http=dict(type='str', default=None),
            https=dict(type='str', default=None),
        ))

    module = AnsibleModule(argument_spec=argument_spec)

    if not HAS_BOTO:
        module.fail_json(msg='boto required for this module')

    msg = module.params['msg']
    subject = module.params['subject']
    topic = module.params['topic']
    email = module.params['email']
    sqs = module.params['sqs']
    sms = module.params['sms']
    http = module.params['http']
    https = module.params['https']

    region, ec2_url, aws_connect_params = get_aws_connection_info(module)
    if not region:
        module.fail_json(msg="region must be specified")
    try:
        connection = connect_to_aws(boto.sns, region, **aws_connect_params)
    except boto.exception.NoAuthHandlerFound:
        e = get_exception()
        module.fail_json(msg=str(e))

    # .publish() takes full ARN topic id, but I'm lazy and type shortnames
    # so do a lookup (topics cannot contain ':', so thats the decider)
    if ':' in topic:
        arn_topic = topic
    else:
        arn_topic = arn_topic_lookup(connection, topic)

    if not arn_topic:
        module.fail_json(msg='Could not find topic: {}'.format(topic))

    dict_msg = {'default': msg}
    if email:
        dict_msg.update(email=email)
    if sqs:
        dict_msg.update(sqs=sqs)
    if sms:
        dict_msg.update(sms=sms)
    if http:
        dict_msg.update(http=http)
    if https:
        dict_msg.update(https=https)

    json_msg = json.dumps(dict_msg)
    try:
        connection.publish(topic=arn_topic,
                           subject=subject,
                           message_structure='json',
                           message=json_msg)
    except boto.exception.BotoServerError:
        e = get_exception()
        module.fail_json(msg=str(e))

    module.exit_json(msg="OK")
コード例 #53
0
ファイル: assemble.py プロジェクト: saran410/Devops
def main():

    module = AnsibleModule(
        # not checking because of daisy chain to file module
        argument_spec=dict(
            src=dict(required=True, type='path'),
            delimiter=dict(required=False),
            dest=dict(required=True, type='path'),
            backup=dict(default=False, type='bool'),
            remote_src=dict(default=False, type='bool'),
            regexp=dict(required=False),
            ignore_hidden=dict(default=False, type='bool'),
            validate=dict(required=False, type='str'),
        ),
        add_file_common_args=True)

    changed = False
    path_hash = None
    dest_hash = None
    src = module.params['src']
    dest = module.params['dest']
    backup = module.params['backup']
    delimiter = module.params['delimiter']
    regexp = module.params['regexp']
    compiled_regexp = None
    ignore_hidden = module.params['ignore_hidden']
    validate = module.params.get('validate', None)

    result = dict(src=src, dest=dest)
    if not os.path.exists(src):
        module.fail_json(msg="Source (%s) does not exist" % src)

    if not os.path.isdir(src):
        module.fail_json(msg="Source (%s) is not a directory" % src)

    if regexp is not None:
        try:
            compiled_regexp = re.compile(regexp)
        except re.error:
            e = get_exception()
            module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (e, regexp))

    if validate and "%s" not in validate:
        module.fail_json(msg="validate must contain %%s: %s" % validate)

    path = assemble_from_fragments(src, delimiter, compiled_regexp,
                                   ignore_hidden)
    path_hash = module.sha1(path)
    result['checksum'] = path_hash

    # Backwards compat.  This won't return data if FIPS mode is active
    try:
        pathmd5 = module.md5(path)
    except ValueError:
        pathmd5 = None
    result['md5sum'] = pathmd5

    if os.path.exists(dest):
        dest_hash = module.sha1(dest)

    if path_hash != dest_hash:
        if validate:
            (rc, out, err) = module.run_command(validate % path)
            result['validation'] = dict(rc=rc, stdout=out, stderr=err)
            if rc != 0:
                cleanup(path)
                module.fail_json(msg="failed to validate: rc:%s error:%s" %
                                 (rc, err))
        if backup and dest_hash is not None:
            result['backup_file'] = module.backup_local(dest)

        module.atomic_move(path,
                           dest,
                           unsafe_writes=module.params['unsafe_writes'])
        changed = True

    cleanup(path, result)

    # handle file permissions
    file_args = module.load_file_common_arguments(module.params)
    result['changed'] = module.set_fs_attributes_if_different(
        file_args, changed)

    # Mission complete
    result['msg'] = "OK"
    module.exit_json(**result)
コード例 #54
0
ファイル: pamd.py プロジェクト: tigerxjtu/ansible
def main():

    module = AnsibleModule(
        argument_spec=dict(
            name=dict(required=True, type='str'),
            type=dict(required=True,
                      choices=['account', 'auth',
                               'password', 'session']),
            control=dict(required=True, type='str'),
            module_path=dict(required=True, type='str'),
            new_type=dict(required=False,
                          choices=['account', 'auth',
                                   'password', 'session']),
            new_control=dict(required=False, type='str'),
            new_module_path=dict(required=False, type='str'),
            module_arguments=dict(required=False, type='list'),
            state=dict(required=False, default="updated",
                       choices=['before', 'after', 'updated',
                                'args_absent', 'args_present']),
            path=dict(required=False, default='/etc/pam.d', type='str')
        ),
        supports_check_mode=True
    )

    service = module.params['name']
    old_type = module.params['type']
    old_control = module.params['control']
    old_module_path = module.params['module_path']

    new_type = module.params['new_type']
    new_control = module.params['new_control']
    new_module_path = module.params['new_module_path']

    module_arguments = module.params['module_arguments']
    state = module.params['state']

    path = module.params['path']

    pamd = PamdService(path, service, module)

    old_rule = PamdRule(old_type,
                        old_control,
                        old_module_path)
    new_rule = PamdRule(new_type,
                        new_control,
                        new_module_path,
                        module_arguments)

    try:
        if state == 'updated':
            change, result = update_rule(pamd,
                                         old_rule,
                                         new_rule)
        elif state == 'before':
            if (new_rule.rule_control is None or
                    new_rule.rule_type is None or
                    new_rule.rule_module_path is None):

                module.fail_json(msg='When inserting a new rule before ' +
                                 'or after an existing rule, new_type, ' +
                                 'new_control and new_module_path must ' +
                                 'all be set.')
            change, result = insert_before_rule(pamd,
                                                old_rule,
                                                new_rule)
        elif state == 'after':
            if (new_rule.rule_control is None or
                    new_rule.rule_type is None or
                    new_rule.rule_module_path is None):

                module.fail_json(msg='When inserting a new rule before' +
                                 'or after an existing rule, new_type,' +
                                 ' new_control and new_module_path must' +
                                 ' all be set.')
            change, result = insert_after_rule(pamd,
                                               old_rule,
                                               new_rule)
        elif state == 'args_absent':
            change, result = remove_module_arguments(pamd,
                                                     old_rule,
                                                     module_arguments)
        elif state == 'args_present':
            change, result = add_module_arguments(pamd,
                                                  old_rule,
                                                  module_arguments)

        if not module.check_mode:
            write_rules(pamd)

    except Exception:
        e = get_exception()
        module.fail_json(msg='error running changing pamd: %s' % str(e))
    facts = {}
    facts['pamd'] = {'changed': change, 'result': result}

    module.params['dest'] = pamd.fname

    module.exit_json(changed=change, ansible_facts=facts)
コード例 #55
0
ファイル: winrm.py プロジェクト: stacywsmith/ansible
from ansible.compat.six.moves.urllib.parse import urlunsplit
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.errors import AnsibleFileNotFound
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.pycompat24 import get_exception
from ansible.plugins.connection import ConnectionBase
from ansible.plugins.shell.powershell import exec_wrapper, become_wrapper, leaf_exec
from ansible.utils.hashing import secure_hash
from ansible.utils.path import makedirs_safe

try:
    import winrm
    from winrm import Response
    from winrm.protocol import Protocol
except ImportError:
    e = get_exception()
    raise AnsibleError("winrm or requests is not installed: %s" % str(e))

try:
    import xmltodict
except ImportError:
    e = get_exception()
    raise AnsibleError("xmltodict is not installed: %s" % str(e))

try:
    from __main__ import display
except ImportError:
    from ansible.utils.display import Display
    display = Display()

コード例 #56
0
ファイル: ldap_attr.py プロジェクト: tbendiksen/ansible
def main():
    module = AnsibleModule(
        argument_spec={
            'bind_dn':
            dict(default=None),
            'bind_pw':
            dict(default='', no_log=True),
            'dn':
            dict(required=True),
            'name':
            dict(required=True),
            'params':
            dict(type='dict'),
            'server_uri':
            dict(default='ldapi:///'),
            'start_tls':
            dict(default=False, type='bool'),
            'state':
            dict(default='present', choices=['present', 'absent', 'exact']),
            'values':
            dict(required=True, type='raw'),
        },
        supports_check_mode=True,
    )

    if not HAS_LDAP:
        module.fail_json(
            msg="Missing requried 'ldap' module (pip install python-ldap)")

    # Update module parameters with user's parameters if defined
    if 'params' in module.params and isinstance(module.params['params'], dict):
        module.params.update(module.params['params'])
        # Remove the params
        module.params.pop('params', None)

    # Instantiate the LdapAttr object
    ldap = LdapAttr(module)

    state = module.params['state']

    # Perform action
    if state == 'present':
        modlist = ldap.add()
    elif state == 'absent':
        modlist = ldap.delete()
    elif state == 'exact':
        modlist = ldap.exact()

    changed = False

    if len(modlist) > 0:
        changed = True

        if not module.check_mode:
            try:
                ldap.connection.modify_s(ldap.dn, modlist)
            except Exception:
                e = get_exception()
                module.fail_json(msg="Attribute action failed.",
                                 details=str(e))

    module.exit_json(changed=changed, modlist=modlist)
コード例 #57
0
def main():
    module = AnsibleModule(argument_spec=dict(
        login_user=dict(default=None),
        login_password=dict(default=None),
        login_host=dict(default='localhost'),
        login_port=dict(default='27017'),
        login_database=dict(default=None),
        replica_set=dict(default=None),
        database=dict(required=True, aliases=['db']),
        name=dict(required=True, aliases=['user']),
        password=dict(aliases=['pass']),
        ssl=dict(default=False, type='bool'),
        roles=dict(default=None, type='list'),
        state=dict(default='present', choices=['absent', 'present']),
        update_password=dict(default="always", choices=["always",
                                                        "on_create"]),
        ssl_cert_reqs=dict(
            default='CERT_REQUIRED',
            choices=['CERT_NONE', 'CERT_OPTIONAL', 'CERT_REQUIRED']),
    ),
                           supports_check_mode=True)

    if not pymongo_found:
        module.fail_json(msg='the python pymongo module is required')

    login_user = module.params['login_user']
    login_password = module.params['login_password']
    login_host = module.params['login_host']
    login_port = module.params['login_port']
    login_database = module.params['login_database']

    replica_set = module.params['replica_set']
    db_name = module.params['database']
    user = module.params['name']
    password = module.params['password']
    ssl = module.params['ssl']
    ssl_cert_reqs = None
    roles = module.params['roles'] or []
    state = module.params['state']
    update_password = module.params['update_password']

    try:
        connection_params = {
            "host": login_host,
            "port": int(login_port),
        }

        if replica_set:
            connection_params["replicaset"] = replica_set

        if ssl:
            connection_params["ssl"] = ssl
            connection_params["ssl_cert_reqs"] = getattr(
                ssl_lib, module.params['ssl_cert_reqs'])

        client = MongoClient(**connection_params)

        # NOTE: this check must be done ASAP.
        # We doesn't need to be authenticated.
        check_compatibility(module, client)

        if login_user is None and login_password is None:
            mongocnf_creds = load_mongocnf()
            if mongocnf_creds is not False:
                login_user = mongocnf_creds['user']
                login_password = mongocnf_creds['password']
        elif login_password is None or login_user is None:
            module.fail_json(
                msg=
                'when supplying login arguments, both login_user and login_password must be provided'
            )

        if login_user is not None and login_password is not None:
            client.admin.authenticate(login_user,
                                      login_password,
                                      source=login_database)
        elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'):
            if db_name != "admin":
                module.fail_json(
                    msg=
                    'The localhost login exception only allows the first admin account to be created'
                )
            #else: this has to be the first admin user added

    except Exception:
        e = get_exception()
        module.fail_json(msg='unable to connect to database: %s' % str(e))

    if state == 'present':
        if password is None and update_password == 'always':
            module.fail_json(
                msg=
                'password parameter required when adding a user unless update_password is set to on_create'
            )

        try:
            uinfo = user_find(client, user, db_name)
            if update_password != 'always' and uinfo:
                password = None
                if not check_if_roles_changed(uinfo, roles, db_name):
                    module.exit_json(changed=False, user=user)

            if module.check_mode:
                module.exit_json(changed=True, user=user)

            user_add(module, client, db_name, user, password, roles)
        except Exception:
            e = get_exception()
            module.fail_json(msg='Unable to add or update user: %s' % str(e))

            # Here we can  check password change if mongo provide a query for that : https://jira.mongodb.org/browse/SERVER-22848
            #newuinfo = user_find(client, user, db_name)
            #if uinfo['role'] == newuinfo['role'] and CheckPasswordHere:
            #    module.exit_json(changed=False, user=user)

    elif state == 'absent':
        try:
            user_remove(module, client, db_name, user)
        except Exception:
            e = get_exception()
            module.fail_json(msg='Unable to remove user: %s' % str(e))

    module.exit_json(changed=True, user=user)
コード例 #58
0
def ensure(module, state, pkgs, conf_file, enablerepo, disablerepo,
           disable_gpg_check, exclude, repoq, skip_broken, installroot='/'):

    # fedora will redirect yum to dnf, which has incompatibilities
    # with how this module expects yum to operate. If yum-deprecated
    # is available, use that instead to emulate the old behaviors.
    if module.get_bin_path('yum-deprecated'):
        yumbin = module.get_bin_path('yum-deprecated')
    else:
        yumbin = module.get_bin_path('yum')

    # need debug level 2 to get 'Nothing to do' for groupinstall.
    yum_basecmd = [yumbin, '-d', '2', '-y']

    if conf_file and os.path.exists(conf_file):
        yum_basecmd += ['-c', conf_file]
        if repoq:
            repoq += ['-c', conf_file]

    dis_repos =[]
    en_repos = []

    if skip_broken:
        yum_basecmd.extend(['--skip-broken'])

    if disablerepo:
        dis_repos = disablerepo.split(',')
        r_cmd = ['--disablerepo=%s' % disablerepo]
        yum_basecmd.extend(r_cmd)
    if enablerepo:
        en_repos = enablerepo.split(',')
        r_cmd = ['--enablerepo=%s' % enablerepo]
        yum_basecmd.extend(r_cmd)

    if exclude:
        e_cmd = ['--exclude=%s' % exclude]
        yum_basecmd.extend(e_cmd)

    if installroot != '/':
        # do not setup installroot by default, because of error
        # CRITICAL:yum.cli:Config Error: Error accessing file for config file:////etc/yum.conf
        # in old yum version (like in CentOS 6.6)
        e_cmd = ['--installroot=%s' % installroot]
        yum_basecmd.extend(e_cmd)

    if state in ['installed', 'present', 'latest']:
        """ The need of this entire if conditional has to be chalanged
            this function is the ensure function that is called
            in the main section.

            This conditional tends to disable/enable repo for
            install present latest action, same actually
            can be done for remove and absent action

            As solution I would advice to cal
            try: my.repos.disableRepo(disablerepo)
            and
            try: my.repos.enableRepo(enablerepo)
            right before any yum_cmd is actually called regardless
            of yum action.

            Please note that enable/disablerepo options are general
            options, this means that we can call those with any action
            option.  https://linux.die.net/man/8/yum

            This docstring will be removed together when issue: #21619
            will be solved.

            This has been triggered by: #19587
        """

        if module.params.get('update_cache'):
            module.run_command(yum_basecmd + ['clean', 'expire-cache'])

        my = yum_base(conf_file, installroot)
        try:
            if disablerepo:
                my.repos.disableRepo(disablerepo)
            current_repos = my.repos.repos.keys()
            if enablerepo:
                try:
                    my.repos.enableRepo(enablerepo)
                    new_repos = my.repos.repos.keys()
                    for i in new_repos:
                        if not i in current_repos:
                            rid = my.repos.getRepo(i)
                            a = rid.repoXML.repoid # nopep8 - https://github.com/ansible/ansible/pull/21475#pullrequestreview-22404868
                    current_repos = new_repos
                except yum.Errors.YumBaseError:
                    e = get_exception()
                    module.fail_json(msg="Error setting/accessing repos: %s" % (e))
        except yum.Errors.YumBaseError:
            e = get_exception()
            module.fail_json(msg="Error accessing repos: %s" % e)
    if state in ['installed', 'present']:
        if disable_gpg_check:
            yum_basecmd.append('--nogpgcheck')
        res = install(module, pkgs, repoq, yum_basecmd, conf_file, en_repos, dis_repos, installroot=installroot)
    elif state in ['removed', 'absent']:
        res = remove(module, pkgs, repoq, yum_basecmd, conf_file, en_repos, dis_repos, installroot=installroot)
    elif state == 'latest':
        if disable_gpg_check:
            yum_basecmd.append('--nogpgcheck')
        res = latest(module, pkgs, repoq, yum_basecmd, conf_file, en_repos, dis_repos, installroot=installroot)
    else:
        # should be caught by AnsibleModule argument_spec
        module.fail_json(msg="we should never get here unless this all"
                " failed", changed=False, results='', errors='unexpected state')
    return res
コード例 #59
0
def main():
    module = AnsibleModule(
        argument_spec=dict(
            path=dict(type='list', required=True),
            format=dict(choices=['gz', 'bz2', 'zip', 'tar'],
                        default='gz',
                        required=False),
            dest=dict(required=False, type='path'),
            remove=dict(required=False, default=False, type='bool'),
        ),
        add_file_common_args=True,
        supports_check_mode=True,
    )

    params = module.params
    check_mode = module.check_mode
    paths = params['path']
    dest = params['dest']
    remove = params['remove']

    expanded_paths = []
    format = params['format']
    globby = False
    changed = False
    state = 'absent'

    # Simple or archive file compression (inapplicable with 'zip' since it's always an archive)
    archive = False
    successes = []

    for i, path in enumerate(paths):
        path = os.path.expanduser(os.path.expandvars(path))

        # Expand any glob characters. If found, add the expanded glob to the
        # list of expanded_paths, which might be empty.
        if ('*' in path or '?' in path):
            expanded_paths = expanded_paths + glob.glob(path)
            globby = True

        # If there are no glob characters the path is added to the expanded paths
        # whether the path exists or not
        else:
            expanded_paths.append(path)

    if len(expanded_paths) == 0:
        return module.fail_json(path=', '.join(paths),
                                expanded_paths=', '.join(expanded_paths),
                                msg='Error, no source paths were found')

    # If we actually matched multiple files or TRIED to, then
    # treat this as a multi-file archive
    archive = globby or os.path.isdir(
        expanded_paths[0]) or len(expanded_paths) > 1

    # Default created file name (for single-file archives) to
    # <file>.<format>
    if not dest and not archive:
        dest = '%s.%s' % (expanded_paths[0], format)

    # Force archives to specify 'dest'
    if archive and not dest:
        module.fail_json(
            dest=dest,
            path=', '.join(paths),
            msg=
            'Error, must specify "dest" when archiving multiple files or trees'
        )

    archive_paths = []
    missing = []
    arcroot = ''

    for path in expanded_paths:
        # Use the longest common directory name among all the files
        # as the archive root path
        if arcroot == '':
            arcroot = os.path.dirname(path) + os.sep
        else:
            for i in range(len(arcroot)):
                if path[i] != arcroot[i]:
                    break

            if i < len(arcroot):
                arcroot = os.path.dirname(arcroot[0:i + 1])

            arcroot += os.sep

        # Don't allow archives to be created anywhere within paths to be removed
        if remove and os.path.isdir(path) and dest.startswith(path):
            module.fail_json(
                path=', '.join(paths),
                msg=
                'Error, created archive can not be contained in source paths when remove=True'
            )

        if os.path.lexists(path):
            archive_paths.append(path)
        else:
            missing.append(path)

    # No source files were found but the named archive exists: are we 'compress' or 'archive' now?
    if len(missing) == len(expanded_paths) and dest and os.path.exists(dest):
        # Just check the filename to know if it's an archive or simple compressed file
        if re.search(r'(\.tar|\.tar\.gz|\.tgz|.tbz2|\.tar\.bz2|\.zip)$',
                     os.path.basename(dest), re.IGNORECASE):
            state = 'archive'
        else:
            state = 'compress'

    # Multiple files, or globbiness
    elif archive:
        if len(archive_paths) == 0:
            # No source files were found, but the archive is there.
            if os.path.lexists(dest):
                state = 'archive'
        elif len(missing) > 0:
            # SOME source files were found, but not all of them
            state = 'incomplete'

        archive = None
        size = 0
        errors = []

        if os.path.lexists(dest):
            size = os.path.getsize(dest)

        if state != 'archive':
            if check_mode:
                changed = True

            else:
                try:
                    # Slightly more difficult (and less efficient!) compression using zipfile module
                    if format == 'zip':
                        arcfile = zipfile.ZipFile(dest, 'w',
                                                  zipfile.ZIP_DEFLATED)

                    # Easier compression using tarfile module
                    elif format == 'gz' or format == 'bz2':
                        arcfile = tarfile.open(dest, 'w|' + format)

                    # Or plain tar archiving
                    elif format == 'tar':
                        arcfile = tarfile.open(dest, 'w')

                    match_root = re.compile('^%s' % re.escape(arcroot))
                    for path in archive_paths:
                        if os.path.isdir(path):
                            # Recurse into directories
                            for dirpath, dirnames, filenames in os.walk(
                                    path, topdown=True):
                                if not dirpath.endswith(os.sep):
                                    dirpath += os.sep

                                for dirname in dirnames:
                                    fullpath = dirpath + dirname
                                    arcname = match_root.sub('', fullpath)

                                    try:
                                        if format == 'zip':
                                            arcfile.write(fullpath, arcname)
                                        else:
                                            arcfile.add(fullpath,
                                                        arcname,
                                                        recursive=False)

                                    except Exception:
                                        e = get_exception()
                                        errors.append('%s: %s' %
                                                      (fullpath, str(e)))

                                for filename in filenames:
                                    fullpath = dirpath + filename
                                    arcname = match_root.sub('', fullpath)

                                    if not filecmp.cmp(fullpath, dest):
                                        try:
                                            if format == 'zip':
                                                arcfile.write(
                                                    fullpath, arcname)
                                            else:
                                                arcfile.add(fullpath,
                                                            arcname,
                                                            recursive=False)

                                            successes.append(fullpath)
                                        except Exception:
                                            e = get_exception()
                                            errors.append('Adding %s: %s' %
                                                          (path, str(e)))
                        else:
                            if format == 'zip':
                                arcfile.write(path, match_root.sub('', path))
                            else:
                                arcfile.add(path,
                                            match_root.sub('', path),
                                            recursive=False)

                            successes.append(path)

                except Exception:
                    e = get_exception()
                    return module.fail_json(
                        msg='Error when writing %s archive at %s: %s' %
                        (format == 'zip' and 'zip' or ('tar.' + format), dest,
                         str(e)))

                if arcfile:
                    arcfile.close()
                    state = 'archive'

                if len(errors) > 0:
                    module.fail_json(
                        msg='Errors when writing archive at %s: %s' %
                        (dest, '; '.join(errors)))

        if state in ['archive', 'incomplete'] and remove:
            for path in successes:
                try:
                    if os.path.isdir(path):
                        shutil.rmtree(path)
                    elif not check_mode:
                        os.remove(path)
                except OSError:
                    e = get_exception()
                    errors.append(path)

            if len(errors) > 0:
                module.fail_json(dest=dest,
                                 msg='Error deleting some source files: ' +
                                 str(e),
                                 files=errors)

        # Rudimentary check: If size changed then file changed. Not perfect, but easy.
        if os.path.getsize(dest) != size:
            changed = True

        if len(successes) and state != 'incomplete':
            state = 'archive'

    # Simple, single-file compression
    else:
        path = expanded_paths[0]

        # No source or compressed file
        if not (os.path.exists(path) or os.path.lexists(dest)):
            state = 'absent'

        # if it already exists and the source file isn't there, consider this done
        elif not os.path.lexists(path) and os.path.lexists(dest):
            state = 'compress'

        else:
            if module.check_mode:
                if not os.path.exists(dest):
                    changed = True
            else:
                size = 0
                f_in = f_out = arcfile = None

                if os.path.lexists(dest):
                    size = os.path.getsize(dest)

                try:
                    if format == 'zip':
                        arcfile = zipfile.ZipFile(dest, 'w',
                                                  zipfile.ZIP_DEFLATED)
                        arcfile.write(path, path[len(arcroot):])
                        arcfile.close()
                        state = 'archive'  # because all zip files are archives

                    else:
                        f_in = open(path, 'rb')

                        if format == 'gz':
                            f_out = gzip.open(dest, 'wb')
                        elif format == 'bz2':
                            f_out = bz2.BZ2File(dest, 'wb')
                        else:
                            raise OSError("Invalid format")

                        shutil.copyfileobj(f_in, f_out)

                    successes.append(path)

                except OSError:
                    e = get_exception()
                    module.fail_json(
                        path=path,
                        dest=dest,
                        msg='Unable to write to compressed file: %s' % str(e))

                if arcfile:
                    arcfile.close()
                if f_in:
                    f_in.close()
                if f_out:
                    f_out.close()

                # Rudimentary check: If size changed then file changed. Not perfect, but easy.
                if os.path.getsize(dest) != size:
                    changed = True

            state = 'compress'

        if remove and not check_mode:
            try:
                os.remove(path)

            except OSError:
                e = get_exception()
                module.fail_json(path=path,
                                 msg='Unable to remove source file: %s' %
                                 str(e))

    params['path'] = dest
    file_args = module.load_file_common_arguments(params)

    changed = module.set_fs_attributes_if_different(file_args, changed)

    module.exit_json(archived=successes,
                     dest=dest,
                     changed=changed,
                     state=state,
                     arcroot=arcroot,
                     missing=missing,
                     expanded_paths=expanded_paths)
コード例 #60
0
ファイル: vsphere_copy.py プロジェクト: giouchiha/ansible-1
def main():

    module = AnsibleModule(
        argument_spec=dict(
            host=dict(required=True, aliases=['hostname']),
            login=dict(required=True, aliases=['username']),
            password=dict(required=True, no_log=True),
            src=dict(required=True, aliases=['name']),
            datacenter=dict(required=True),
            datastore=dict(required=True),
            dest=dict(required=True, aliases=['path']),
            validate_certs=dict(required=False, default=True, type='bool'),
        ),
        # Implementing check-mode using HEAD is impossible, since size/date is not 100% reliable
        supports_check_mode=False,
    )

    host = module.params.get('host')
    login = module.params.get('login')
    password = module.params.get('password')
    src = module.params.get('src')
    datacenter = module.params.get('datacenter')
    datastore = module.params.get('datastore')
    dest = module.params.get('dest')
    validate_certs = module.params.get('validate_certs')

    fd = open(src, "rb")
    atexit.register(fd.close)

    data = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ)
    atexit.register(data.close)

    remote_path = vmware_path(datastore, datacenter, dest)
    url = 'https://%s%s' % (host, remote_path)

    headers = {
        "Content-Type": "application/octet-stream",
        "Content-Length": str(len(data)),
    }

    try:
        r = open_url(url,
                     data=data,
                     headers=headers,
                     method='PUT',
                     url_username=login,
                     url_password=password,
                     validate_certs=validate_certs,
                     force_basic_auth=True)
    except socket.error:
        e = get_exception()
        if isinstance(e.args, tuple) and e[0] == errno.ECONNRESET:
            # VSphere resets connection if the file is in use and cannot be replaced
            module.fail_json(msg='Failed to upload, image probably in use',
                             status=None,
                             errno=e[0],
                             reason=str(e),
                             url=url)
        else:
            module.fail_json(msg=str(e),
                             status=None,
                             errno=e[0],
                             reason=str(e),
                             url=url)
    except Exception:
        e = get_exception()
        error_code = -1
        try:
            if isinstance(e[0], int):
                error_code = e[0]
        except KeyError:
            pass
        module.fail_json(msg=str(e),
                         status=None,
                         errno=error_code,
                         reason=str(e),
                         url=url)

    status = r.getcode()
    if 200 <= status < 300:
        module.exit_json(changed=True, status=status, reason=r.msg, url=url)
    else:
        length = r.headers.get('content-length', None)
        if r.headers.get('transfer-encoding', '').lower() == 'chunked':
            chunked = 1
        else:
            chunked = 0

        module.fail_json(msg='Failed to upload',
                         errno=None,
                         status=status,
                         reason=r.msg,
                         length=length,
                         headers=dict(r.headers),
                         chunked=chunked,
                         url=url)