Beispiel #1
0
 def get_host(self, macaddr):
     msg = OmapiMessage.open(to_bytes("host", errors='surrogate_or_strict'))
     msg.obj.append((to_bytes("hardware-address", errors='surrogate_or_strict'), pack_mac(macaddr)))
     msg.obj.append((to_bytes("hardware-type", errors='surrogate_or_strict'), struct.pack("!I", 1)))
     response = self.omapi.query_server(msg)
     if response.opcode != OMAPI_OP_UPDATE:
         return None
     return response
def check_path(path, is_dir=False):
    type_name = 'directory' if is_dir else 'file'
    parent, file_name = os.path.split(path)
    name, ext = os.path.splitext(file_name)

    if name.upper() in ILLEGAL_NAMES:
        print("%s: illegal %s name %s" % (path, type_name, name.upper()))

    if file_name[-1] in ILLEGAL_END_CHARS:
        print("%s: illegal %s name end-char '%s'" % (path, type_name, file_name[-1]))

    bfile = to_bytes(file_name, encoding='utf-8')
    for char in ILLEGAL_CHARS:
        if char in bfile:
            bpath = to_bytes(path, encoding='utf-8')
            print("%s: illegal char '%s' in %s name" % (bpath, char, type_name))
def check_path(path, dir=False):
    type_name = 'directory' if dir else 'file'
    parent, file_name = os.path.split(path)
    name, ext = os.path.splitext(file_name)

    if name.upper() in ILLEGAL_NAMES:
        print("%s: illegal %s name %s" % (path, type_name, name.upper()))

    if file_name[-1] in ILLEGAL_END_CHARS:
        print("%s: illegal %s name end-char '%s'" % (path, type_name, file_name[-1]))

    bfile = to_bytes(file_name, encoding='utf-8')
    for char in ILLEGAL_CHARS:
        if char in bfile:
            bpath = to_bytes(path, encoding='utf-8')
            print("%s: illegal char '%s' in %s name" % (bpath, char, type_name))
Beispiel #4
0
    def logout(self):
        if self.connection._auth is not None:
            self.send_request("POST",
                              "/sepm/api/v1/identity/logout",
                              data=to_bytes(json.dumps(self.connection._auth)))

            # Clean up tokens
            self.connection._auth = None
Beispiel #5
0
 def _write_password_to_pipe(proc):
     os.close(_sshpass_pipe[0])
     try:
         os.write(_sshpass_pipe[1], to_bytes(rsync_password) + b'\n')
     except OSError as exc:
         # Ignore broken pipe errors if the sshpass process has exited.
         if exc.errno != errno.EPIPE or proc.poll() is None:
             raise
Beispiel #6
0
def check_path(path, is_dir=False):
    """Check the specified path for unwanted characters and names."""
    type_name = 'directory' if is_dir else 'file'
    file_name = os.path.basename(path.rstrip(os.path.sep))
    name = os.path.splitext(file_name)[0]

    if name.upper() in ILLEGAL_NAMES:
        print("%s: illegal %s name %s" % (path, type_name, name.upper()))

    if file_name[-1] in ILLEGAL_END_CHARS:
        print("%s: illegal %s name end-char '%s'" %
              (path, type_name, file_name[-1]))

    bfile = to_bytes(file_name, encoding='utf-8')
    for char in ILLEGAL_CHARS:
        if char in bfile:
            bpath = to_bytes(path, encoding='utf-8')
            print("%s: illegal char '%s' in %s name" %
                  (bpath, char, type_name))
Beispiel #7
0
def check_path(path, dir=False):
    errors = []
    type_name = 'directory' if dir else 'file'
    parent, file_name = os.path.split(path)
    name, ext = os.path.splitext(file_name)

    if name.upper() in ILLEGAL_NAMES:
        errors.append("Illegal %s name %s: %s" %
                      (type_name, name.upper(), path))

    if file_name[-1] in ILLEGAL_END_CHARS:
        errors.append("Illegal %s name end-char '%s': %s" %
                      (type_name, file_name[-1], path))

    bfile = to_bytes(file_name, encoding='utf-8')
    for char in ILLEGAL_CHARS:
        if char in bfile:
            bpath = to_bytes(path, encoding='utf-8')
            errors.append("Illegal char %s in %s name: %s" %
                          (char, type_name, bpath))
    return errors
Beispiel #8
0
    def login(self, username, password):
        login_path = "/sepm/api/v1/identity/authenticate"
        data = {"username": username, "password": password}

        response, response_data = self.send_request("POST",
                                                    login_path,
                                                    data=to_bytes(
                                                        json.dumps(data)))

        try:
            self.connection._auth = {
                "Authorization": "Bearer {0}".format(response_data["token"])
            }
        except KeyError:
            raise AnsibleAuthenticationFailure(
                message="Failed to acquire login token.")
    def send_request(
        self,
        request_method,
        url,
        params=None,
        data=None,
        headers=None,
        query_string_auth=False,
    ):
        params = params if params else {}
        headers = headers if headers else BASE_HEADERS
        data = data if data else {}

        # Some Trend Micro API Endpoints require the sID in the query string
        # instead of honoring the session Cookie
        if query_string_auth:
            self.connection._connect()
            params["sID"] = self._auth_token

        if params:
            params_with_val = {}
            for param in params:
                if params[param] is not None:
                    params_with_val[param] = params[param]
            url = "{0}?{1}".format(url, urlencode(params_with_val))
        try:
            self._display_request(request_method)
            response, response_data = self.connection.send(
                url,
                to_bytes(json.dumps(data)),
                method=request_method,
                headers=headers,
            )
            value = self._get_response_value(response_data)

        except HTTPError as e:
            error = json.loads(e.read())
            return e.code, error
        return response.getcode(), self._response_to_json(value)
    def install(self):
        changed = False
        plugin_file = (
            '%s/plugins/%s.jpi' % (
                self.params['jenkins_home'],
                self.params['name']))

        if not self.is_installed and self.params['version'] in [None, 'latest']:
            if not self.module.check_mode:
                # Install the plugin (with dependencies)
                install_script = (
                    'd = Jenkins.instance.updateCenter.getPlugin("%s")'
                    '.deploy(); d.get();' % self.params['name'])

                if self.params['with_dependencies']:
                    install_script = (
                        'Jenkins.instance.updateCenter.getPlugin("%s")'
                        '.getNeededDependencies().each{it.deploy()}; %s' % (
                            self.params['name'], install_script))

                script_data = {
                    'script': install_script
                }
                data = urlencode(script_data)

                # Send the installation request
                r = self._get_url_data(
                    "%s/scriptText" % self.url,
                    msg_status="Cannot install plugin.",
                    msg_exception="Plugin installation has failed.",
                    data=data)

                hpi_file = '%s/plugins/%s.hpi' % (
                    self.params['jenkins_home'],
                    self.params['name'])

                if os.path.isfile(hpi_file):
                    os.remove(hpi_file)

            changed = True
        else:
            # Check if the plugin directory exists
            if not os.path.isdir(self.params['jenkins_home']):
                self.module.fail_json(
                    msg="Jenkins home directory doesn't exist.")

            md5sum_old = None
            if os.path.isfile(plugin_file):
                # Make the checksum of the currently installed plugin
                with open(plugin_file, 'rb') as md5_plugin_fh:
                    md5_plugin_content = md5_plugin_fh.read()
                md5sum_old = hashlib.md5(md5_plugin_content).hexdigest()

            if self.params['version'] in [None, 'latest']:
                # Take latest version
                plugin_url = (
                    "%s/latest/%s.hpi" % (
                        self.params['updates_url'],
                        self.params['name']))
            else:
                # Take specific version
                plugin_url = (
                    "{0}/download/plugins/"
                    "{1}/{2}/{1}.hpi".format(
                        self.params['updates_url'],
                        self.params['name'],
                        self.params['version']))

            if (
                    self.params['updates_expiration'] == 0 or
                    self.params['version'] not in [None, 'latest'] or
                    md5sum_old is None):

                # Download the plugin file directly
                r = self._download_plugin(plugin_url)

                # Write downloaded plugin into file if checksums don't match
                if md5sum_old is None:
                    # No previously installed plugin
                    if not self.module.check_mode:
                        self._write_file(plugin_file, r)

                    changed = True
                else:
                    # Get data for the MD5
                    data = r.read()

                    # Make new checksum
                    md5sum_new = hashlib.md5(data).hexdigest()

                    # If the checksum is different from the currently installed
                    # plugin, store the new plugin
                    if md5sum_old != md5sum_new:
                        if not self.module.check_mode:
                            self._write_file(plugin_file, data)

                        changed = True
            elif self.params['version'] == 'latest':
                # Check for update from the updates JSON file
                plugin_data = self._download_updates()

                try:
                    with open(plugin_file, 'rb') as sha1_plugin_fh:
                        sha1_plugin_content = sha1_plugin_fh.read()
                    sha1_old = hashlib.sha1(sha1_plugin_content)
                except Exception as e:
                    self.module.fail_json(
                        msg="Cannot calculate SHA1 of the old plugin.",
                        details=to_native(e))

                sha1sum_old = base64.b64encode(sha1_old.digest())

                # If the latest version changed, download it
                if sha1sum_old != to_bytes(plugin_data['sha1']):
                    if not self.module.check_mode:
                        r = self._download_plugin(plugin_url)
                        self._write_file(plugin_file, r)

                    changed = True

        # Change file attributes if needed
        if os.path.isfile(plugin_file):
            params = {
                'dest': plugin_file
            }
            params.update(self.params)
            file_args = self.module.load_file_common_arguments(params)

            if not self.module.check_mode:
                # Not sure how to run this in the check mode
                changed = self.module.set_fs_attributes_if_different(
                    file_args, changed)
            else:
                # See the comment above
                changed = True

        return changed
Beispiel #11
0
    def setup_host(self):
        if self.module.params['hostname'] is None or len(self.module.params['hostname']) == 0:
            self.module.fail_json(msg="name attribute could not be empty when adding or modifying host.")

        msg = None
        host_response = self.get_host(self.module.params['macaddr'])
        # If host was not found using macaddr, add create message
        if host_response is None:
            msg = OmapiMessage.open(to_bytes('host', errors='surrogate_or_strict'))
            msg.message.append(('create', struct.pack('!I', 1)))
            msg.message.append(('exclusive', struct.pack('!I', 1)))
            msg.obj.append(('hardware-address', pack_mac(self.module.params['macaddr'])))
            msg.obj.append(('hardware-type', struct.pack('!I', 1)))
            msg.obj.append(('name', self.module.params['hostname']))
            if self.module.params['ip'] is not None:
                msg.obj.append((to_bytes("ip-address", errors='surrogate_or_strict'), pack_ip(self.module.params['ip'])))

            stmt_join = ""
            if self.module.params['ddns']:
                stmt_join += 'ddns-hostname "{0}"; '.format(self.module.params['hostname'])

            try:
                if len(self.module.params['statements']) > 0:
                    stmt_join += "; ".join(self.module.params['statements'])
                    stmt_join += "; "
            except TypeError:
                e = get_exception()
                self.module.fail_json(msg="Invalid statements found: %s" % e)

            if len(stmt_join) > 0:
                msg.obj.append(('statements', stmt_join))

            try:
                response = self.omapi.query_server(msg)
                if response.opcode != OMAPI_OP_UPDATE:
                    self.module.fail_json(msg="Failed to add host, ensure authentication and host parameters "
                                              "are valid.")
                self.module.exit_json(changed=True, lease=self.unpack_facts(response.obj))
            except OmapiError:
                e = get_exception()
                self.module.fail_json(msg="OMAPI error: %s" % e)
        # Forge update message
        else:
            response_obj = self.unpack_facts(host_response.obj)
            fields_to_update = {}

            if to_bytes('ip-address', errors='surrogate_or_strict') not in response_obj or \
                            unpack_ip(response_obj[to_bytes('ip-address', errors='surrogate_or_strict')]) != self.module.params['ip']:
                fields_to_update['ip-address'] = pack_ip(self.module.params['ip'])

            # Name cannot be changed
            if 'name' not in response_obj or response_obj['name'] != self.module.params['hostname']:
                self.module.fail_json(msg="Changing hostname is not supported. Old was %s, new is %s. "
                                          "Please delete host and add new." %
                                          (response_obj['name'], self.module.params['hostname']))

            """
            # It seems statements are not returned by OMAPI, then we cannot modify them at this moment.
            if 'statements' not in response_obj and len(self.module.params['statements']) > 0 or \
                response_obj['statements'] != self.module.params['statements']:
                with open('/tmp/omapi', 'w') as fb:
                    for (k,v) in iteritems(response_obj):
                        fb.writelines('statements: %s %s\n' % (k, v))
            """
            if len(fields_to_update) == 0:
                self.module.exit_json(changed=False, lease=response_obj)
            else:
                msg = OmapiMessage.update(host_response.handle)
                msg.update_object(fields_to_update)

            try:
                response = self.omapi.query_server(msg)
                if response.opcode != OMAPI_OP_STATUS:
                    self.module.fail_json(msg="Failed to modify host, ensure authentication and host parameters "
                                              "are valid.")
                self.module.exit_json(changed=True)
            except OmapiError:
                e = get_exception()
                self.module.fail_json(msg="OMAPI error: %s" % e)
Beispiel #12
0
    def exec_module(self, **kwargs):

        nsg = None

        for key in list(self.module_arg_spec.keys()) + ['tags']:
            setattr(self, key, kwargs[key])

        if self.module._name == 'azure_rm_virtualmachine_scaleset':
            self.module.deprecate(
                "The 'azure_rm_virtualmachine_scaleset' module has been renamed to 'azure_rm_virtualmachinescaleset'",
                version='2.12')

        # make sure options are lower case
        self.remove_on_absent = set(
            [resource.lower() for resource in self.remove_on_absent])

        # convert elements to ints
        self.zones = [int(i) for i in self.zones] if self.zones else None

        # default virtual_network_resource_group to resource_group
        if not self.virtual_network_resource_group:
            self.virtual_network_resource_group = self.resource_group

        changed = False
        results = dict()
        vmss = None
        disable_ssh_password = None
        vmss_dict = None
        virtual_network = None
        subnet = None
        image_reference = None
        custom_image = False
        load_balancer_backend_address_pools = None
        load_balancer_inbound_nat_pools = None
        load_balancer = None
        support_lb_change = True

        resource_group = self.get_resource_group(self.resource_group)
        if not self.location:
            # Set default location
            self.location = resource_group.location

        if self.custom_data:
            self.custom_data = to_native(
                base64.b64encode(to_bytes(self.custom_data)))

        if self.state == 'present':
            # Verify parameters and resolve any defaults

            if self.vm_size and not self.vm_size_is_valid():
                self.fail(
                    "Parameter error: vm_size {0} is not valid for your subscription and location."
                    .format(self.vm_size))

            # if self.virtual_network_name:
            #     virtual_network = self.get_virtual_network(self.virtual_network_name)

            if self.ssh_public_keys:
                msg = "Parameter error: expecting ssh_public_keys to be a list of type dict where " \
                    "each dict contains keys: path, key_data."
                for key in self.ssh_public_keys:
                    if not isinstance(key, dict):
                        self.fail(msg)
                    if not key.get('path') or not key.get('key_data'):
                        self.fail(msg)

            if self.image and isinstance(self.image, dict):
                if all(key in self.image
                       for key in ('publisher', 'offer', 'sku', 'version')):
                    marketplace_image = self.get_marketplace_image_version()
                    if self.image['version'] == 'latest':
                        self.image['version'] = marketplace_image.name
                        self.log("Using image version {0}".format(
                            self.image['version']))

                    image_reference = self.compute_models.ImageReference(
                        publisher=self.image['publisher'],
                        offer=self.image['offer'],
                        sku=self.image['sku'],
                        version=self.image['version'])
                elif self.image.get('name'):
                    custom_image = True
                    image_reference = self.get_custom_image_reference(
                        self.image.get('name'),
                        self.image.get('resource_group'))
                else:
                    self.fail(
                        "parameter error: expecting image to contain [publisher, offer, sku, version] or [name, resource_group]"
                    )
            elif self.image and isinstance(self.image, str):
                custom_image = True
                image_reference = self.get_custom_image_reference(self.image)
            elif self.image:
                self.fail(
                    "parameter error: expecting image to be a string or dict not {0}"
                    .format(type(self.image).__name__))

            disable_ssh_password = not self.ssh_password_enabled

            if self.load_balancer:
                load_balancer = self.get_load_balancer(self.load_balancer)
                load_balancer_backend_address_pools = ([
                    self.compute_models.SubResource(id=resource.id)
                    for resource in load_balancer.backend_address_pools
                ] if load_balancer.backend_address_pools else None)
                load_balancer_inbound_nat_pools = ([
                    self.compute_models.SubResource(id=resource.id)
                    for resource in load_balancer.inbound_nat_pools
                ] if load_balancer.inbound_nat_pools else None)

        try:
            self.log("Fetching virtual machine scale set {0}".format(
                self.name))
            vmss = self.compute_client.virtual_machine_scale_sets.get(
                self.resource_group, self.name)
            self.check_provisioning_state(vmss, self.state)
            vmss_dict = self.serialize_vmss(vmss)

            if self.state == 'present':
                differences = []
                results = vmss_dict

                if self.os_disk_caching and \
                   self.os_disk_caching != vmss_dict['properties']['virtualMachineProfile']['storageProfile']['osDisk']['caching']:
                    self.log(
                        'CHANGED: virtual machine scale set {0} - OS disk caching'
                        .format(self.name))
                    differences.append('OS Disk caching')
                    changed = True
                    vmss_dict['properties']['virtualMachineProfile'][
                        'storageProfile']['osDisk'][
                            'caching'] = self.os_disk_caching

                if self.capacity and \
                   self.capacity != vmss_dict['sku']['capacity']:
                    self.log(
                        'CHANGED: virtual machine scale set {0} - Capacity'.
                        format(self.name))
                    differences.append('Capacity')
                    changed = True
                    vmss_dict['sku']['capacity'] = self.capacity

                if self.data_disks and \
                   len(self.data_disks) != len(vmss_dict['properties']['virtualMachineProfile']['storageProfile'].get('dataDisks', [])):
                    self.log(
                        'CHANGED: virtual machine scale set {0} - Data Disks'.
                        format(self.name))
                    differences.append('Data Disks')
                    changed = True

                if self.upgrade_policy and \
                   self.upgrade_policy != vmss_dict['properties']['upgradePolicy']['mode']:
                    self.log(
                        'CHANGED: virtual machine scale set {0} - Upgrade Policy'
                        .format(self.name))
                    differences.append('Upgrade Policy')
                    changed = True
                    vmss_dict['properties']['upgradePolicy'][
                        'mode'] = self.upgrade_policy

                if image_reference and \
                   image_reference.as_dict() != vmss_dict['properties']['virtualMachineProfile']['storageProfile']['imageReference']:
                    self.log('CHANGED: virtual machine scale set {0} - Image'.
                             format(self.name))
                    differences.append('Image')
                    changed = True
                    vmss_dict['properties']['virtualMachineProfile'][
                        'storageProfile'][
                            'imageReference'] = image_reference.as_dict()

                update_tags, vmss_dict['tags'] = self.update_tags(
                    vmss_dict.get('tags', dict()))
                if update_tags:
                    differences.append('Tags')
                    changed = True

                if bool(self.overprovision) != bool(
                        vmss_dict['properties']['overprovision']):
                    differences.append('overprovision')
                    changed = True

                vmss_dict['zones'] = [
                    int(i) for i in vmss_dict['zones']
                ] if 'zones' in vmss_dict and vmss_dict['zones'] else None
                if self.zones != vmss_dict['zones']:
                    self.log(
                        "CHANGED: virtual machine scale sets {0} zones".format(
                            self.name))
                    differences.append('Zones')
                    changed = True
                    vmss_dict['zones'] = self.zones

                nicConfigs = vmss_dict['properties']['virtualMachineProfile'][
                    'networkProfile']['networkInterfaceConfigurations']
                backend_address_pool = nicConfigs[0]['properties'][
                    'ipConfigurations'][0]['properties'].get(
                        'loadBalancerBackendAddressPools', [])
                if (len(nicConfigs) != 1 or len(backend_address_pool) != 1):
                    support_lb_change = False  # Currently not support for the vmss contains more than one loadbalancer
                    self.module.warn(
                        'Updating more than one load balancer on VMSS is currently not supported'
                    )
                else:
                    load_balancer_id = "{0}/".format(
                        load_balancer.id) if load_balancer else None
                    backend_address_pool_id = backend_address_pool[0].get('id')
                    if bool(load_balancer_id) != bool(
                            backend_address_pool_id
                    ) or not backend_address_pool_id.startswith(
                            load_balancer_id):
                        differences.append('load_balancer')
                        changed = True

                if self.custom_data:
                    if self.custom_data != vmss_dict['properties'][
                            'virtualMachineProfile']['osProfile'].get(
                                'customData'):
                        differences.append('custom_data')
                        changed = True
                        vmss_dict['properties']['virtualMachineProfile'][
                            'osProfile']['customData'] = self.custom_data

                self.differences = differences

            elif self.state == 'absent':
                self.log(
                    "CHANGED: virtual machine scale set {0} exists and requested state is 'absent'"
                    .format(self.name))
                results = dict()
                changed = True

        except CloudError:
            self.log('Virtual machine scale set {0} does not exist'.format(
                self.name))
            if self.state == 'present':
                self.log(
                    "CHANGED: virtual machine scale set {0} does not exist but state is 'present'."
                    .format(self.name))
                changed = True

        self.results['changed'] = changed
        self.results['ansible_facts']['azure_vmss'] = results

        if self.check_mode:
            return self.results

        if changed:
            if self.state == 'present':
                if not vmss:
                    # Create the VMSS
                    self.log("Create virtual machine scale set {0}".format(
                        self.name))
                    self.results['actions'].append('Created VMSS {0}'.format(
                        self.name))

                    # Validate parameters
                    if not self.admin_username:
                        self.fail(
                            "Parameter error: admin_username required when creating a virtual machine scale set."
                        )

                    if self.os_type == 'Linux':
                        if disable_ssh_password and not self.ssh_public_keys:
                            self.fail(
                                "Parameter error: ssh_public_keys required when disabling SSH password."
                            )

                    if not self.virtual_network_name:
                        default_vnet = self.create_default_vnet()
                        virtual_network = default_vnet.id
                        self.virtual_network_name = default_vnet.name

                    if self.subnet_name:
                        subnet = self.get_subnet(self.virtual_network_name,
                                                 self.subnet_name)

                    if not self.short_hostname:
                        self.short_hostname = self.name

                    if not image_reference:
                        self.fail(
                            "Parameter error: an image is required when creating a virtual machine."
                        )

                    managed_disk = self.compute_models.VirtualMachineScaleSetManagedDiskParameters(
                        storage_account_type=self.managed_disk_type)

                    if self.security_group:
                        nsg = self.parse_nsg()
                        if nsg:
                            self.security_group = self.network_models.NetworkSecurityGroup(
                                id=nsg.get('id'))

                    vmss_resource = self.compute_models.VirtualMachineScaleSet(
                        location=self.location,
                        overprovision=self.overprovision,
                        tags=self.tags,
                        upgrade_policy=self.compute_models.UpgradePolicy(
                            mode=self.upgrade_policy),
                        sku=self.compute_models.Sku(
                            name=self.vm_size,
                            capacity=self.capacity,
                            tier=self.tier,
                        ),
                        virtual_machine_profile=self.compute_models.
                        VirtualMachineScaleSetVMProfile(
                            os_profile=self.compute_models.
                            VirtualMachineScaleSetOSProfile(
                                admin_username=self.admin_username,
                                computer_name_prefix=self.short_hostname,
                                custom_data=self.custom_data),
                            storage_profile=self.compute_models.
                            VirtualMachineScaleSetStorageProfile(
                                os_disk=self.compute_models.
                                VirtualMachineScaleSetOSDisk(
                                    managed_disk=managed_disk,
                                    create_option=self.compute_models.
                                    DiskCreateOptionTypes.from_image,
                                    caching=self.os_disk_caching,
                                ),
                                image_reference=image_reference,
                            ),
                            network_profile=self.compute_models.
                            VirtualMachineScaleSetNetworkProfile(
                                network_interface_configurations=[
                                    self.compute_models.
                                    VirtualMachineScaleSetNetworkConfiguration(
                                        name=self.name,
                                        primary=True,
                                        ip_configurations=[
                                            self.compute_models.
                                            VirtualMachineScaleSetIPConfiguration(
                                                name='default',
                                                subnet=self.compute_models.
                                                ApiEntityReference(
                                                    id=subnet.id),
                                                primary=True,
                                                load_balancer_backend_address_pools
                                                =load_balancer_backend_address_pools,
                                                load_balancer_inbound_nat_pools=
                                                load_balancer_inbound_nat_pools
                                            )
                                        ],
                                        enable_accelerated_networking=self.
                                        enable_accelerated_networking,
                                        network_security_group=self.
                                        security_group)
                                ])),
                        zones=self.zones)

                    if self.admin_password:
                        vmss_resource.virtual_machine_profile.os_profile.admin_password = self.admin_password

                    if self.os_type == 'Linux':
                        vmss_resource.virtual_machine_profile.os_profile.linux_configuration = self.compute_models.LinuxConfiguration(
                            disable_password_authentication=disable_ssh_password
                        )

                    if self.ssh_public_keys:
                        ssh_config = self.compute_models.SshConfiguration()
                        ssh_config.public_keys = \
                            [self.compute_models.SshPublicKey(path=key['path'], key_data=key['key_data']) for key in self.ssh_public_keys]
                        vmss_resource.virtual_machine_profile.os_profile.linux_configuration.ssh = ssh_config

                    if self.data_disks:
                        data_disks = []

                        for data_disk in self.data_disks:
                            data_disk_managed_disk = self.compute_models.VirtualMachineScaleSetManagedDiskParameters(
                                storage_account_type=data_disk.get(
                                    'managed_disk_type', None))

                            data_disk['caching'] = data_disk.get(
                                'caching',
                                self.compute_models.CachingTypes.read_only)

                            data_disks.append(
                                self.compute_models.
                                VirtualMachineScaleSetDataDisk(
                                    lun=data_disk.get('lun', None),
                                    caching=data_disk.get('caching', None),
                                    create_option=self.compute_models.
                                    DiskCreateOptionTypes.empty,
                                    disk_size_gb=data_disk.get(
                                        'disk_size_gb', None),
                                    managed_disk=data_disk_managed_disk,
                                ))

                        vmss_resource.virtual_machine_profile.storage_profile.data_disks = data_disks

                    self.log("Create virtual machine with parameters:")
                    self.create_or_update_vmss(vmss_resource)

                elif self.differences and len(self.differences) > 0:
                    self.log("Update virtual machine scale set {0}".format(
                        self.name))
                    self.results['actions'].append('Updated VMSS {0}'.format(
                        self.name))

                    vmss_resource = self.get_vmss()
                    vmss_resource.virtual_machine_profile.storage_profile.os_disk.caching = self.os_disk_caching
                    vmss_resource.sku.capacity = self.capacity
                    vmss_resource.overprovision = self.overprovision

                    if support_lb_change:
                        vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0] \
                            .ip_configurations[0].load_balancer_backend_address_pools = load_balancer_backend_address_pools
                        vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0] \
                            .ip_configurations[0].load_balancer_inbound_nat_pools = load_balancer_inbound_nat_pools

                    if self.data_disks is not None:
                        data_disks = []
                        for data_disk in self.data_disks:
                            data_disks.append(
                                self.compute_models.
                                VirtualMachineScaleSetDataDisk(
                                    lun=data_disk['lun'],
                                    caching=data_disk['caching'],
                                    create_option=self.compute_models.
                                    DiskCreateOptionTypes.empty,
                                    disk_size_gb=data_disk['disk_size_gb'],
                                    managed_disk=self.compute_models.
                                    VirtualMachineScaleSetManagedDiskParameters(
                                        storage_account_type=data_disk[
                                            'managed_disk_type']),
                                ))
                        vmss_resource.virtual_machine_profile.storage_profile.data_disks = data_disks

                    if image_reference is not None:
                        vmss_resource.virtual_machine_profile.storage_profile.image_reference = image_reference
                    self.log("Update virtual machine with parameters:")
                    self.create_or_update_vmss(vmss_resource)

                self.results['ansible_facts'][
                    'azure_vmss'] = self.serialize_vmss(self.get_vmss())

            elif self.state == 'absent':
                # delete the VM
                self.log("Delete virtual machine scale set {0}".format(
                    self.name))
                self.results['ansible_facts']['azure_vmss'] = None
                self.delete_vmss(vmss)

        # until we sort out how we want to do this globally
        del self.results['actions']

        return self.results
    def install(self):
        changed = False
        plugin_file = (
            '%s/plugins/%s.jpi' % (
                self.params['jenkins_home'],
                self.params['name']))

        if not self.is_installed and self.params['version'] in [None, 'latest']:
            try:
                self._install_with_plugin_manager()
                changed = True
            except FailedInstallingWithPluginManager:  # Fallback to manually downloading the plugin
                pass

        if not changed:
            # Check if the plugin directory exists
            if not os.path.isdir(self.params['jenkins_home']):
                self.module.fail_json(
                    msg="Jenkins home directory doesn't exist.")

            checksum_old = None
            if os.path.isfile(plugin_file):
                # Make the checksum of the currently installed plugin
                with open(plugin_file, 'rb') as plugin_fh:
                    plugin_content = plugin_fh.read()
                checksum_old = hashlib.sha1(plugin_content).hexdigest()

            if self.params['version'] in [None, 'latest']:
                # Take latest version
                plugin_urls = self._get_latest_plugin_urls()
            else:
                # Take specific version
                plugin_urls = self._get_versioned_plugin_urls()
            if (
                    self.params['updates_expiration'] == 0 or
                    self.params['version'] not in [None, 'latest'] or
                    checksum_old is None):

                # Download the plugin file directly
                r = self._download_plugin(plugin_urls)

                # Write downloaded plugin into file if checksums don't match
                if checksum_old is None:
                    # No previously installed plugin
                    if not self.module.check_mode:
                        self._write_file(plugin_file, r)

                    changed = True
                else:
                    # Get data for the MD5
                    data = r.read()

                    # Make new checksum
                    checksum_new = hashlib.sha1(data).hexdigest()

                    # If the checksum is different from the currently installed
                    # plugin, store the new plugin
                    if checksum_old != checksum_new:
                        if not self.module.check_mode:
                            self._write_file(plugin_file, data)

                        changed = True
            elif self.params['version'] == 'latest':
                # Check for update from the updates JSON file
                plugin_data = self._download_updates()

                # If the latest version changed, download it
                if checksum_old != to_bytes(plugin_data['sha1']):
                    if not self.module.check_mode:
                        r = self._download_plugin(plugin_urls)
                        self._write_file(plugin_file, r)

                    changed = True

        # Change file attributes if needed
        if os.path.isfile(plugin_file):
            params = {
                'dest': plugin_file
            }
            params.update(self.params)
            file_args = self.module.load_file_common_arguments(params)

            if not self.module.check_mode:
                # Not sure how to run this in the check mode
                changed = self.module.set_fs_attributes_if_different(
                    file_args, changed)
            else:
                # See the comment above
                changed = True

        return changed
Beispiel #14
0
def set_module_args(mod_args):
    args = json.dumps({'ANSIBLE_MODULE_ARGS': mod_args})
    basic._ANSIBLE_ARGS = to_bytes(args)