def query(action=None, command=None, args=None, method="GET", data=None): """ Make a web call to a Parallels provider """ path = config.get_config_value("url", get_configured_provider(), __opts__, search_global=False) auth_handler = urllib2.HTTPBasicAuthHandler() auth_handler.add_password( realm="Parallels Instance Manager", uri=path, user=config.get_config_value("user", get_configured_provider(), __opts__, search_global=False), passwd=config.get_config_value("password", get_configured_provider(), __opts__, search_global=False), ) opener = urllib2.build_opener(auth_handler) urllib2.install_opener(opener) if action: path += action if command: path += "/{0}".format(command) if type(args) is not dict: args = {} kwargs = {"data": data} if type(data) is str and "<?xml" in data: kwargs["headers"] = {"Content-type": "application/xml"} if args: path += "?%s" params = urllib.urlencode(args) req = urllib2.Request(url=path % params, **kwargs) else: req = urllib2.Request(url=path, **kwargs) req.get_method = lambda: method log.debug("{0} {1}".format(method, req.get_full_url())) if data: log.debug(data) try: result = urllib2.urlopen(req) log.debug("PARALLELS Response Status Code: {0}".format(result.getcode())) if "content-length" in result.headers: content = result.read() result.close() items = ET.fromstring(content) return items return {} except urllib2.URLError as exc: log.error("PARALLELS Response Status Code: {0} {1}".format(exc.code, exc.msg)) root = ET.fromstring(exc.read()) log.error(root) return {"error": root}
def _get_error(error): # Converts boto exception to string that can be used to output error. error = '\n'.join(error.split('\n')[1:]) error = ET.fromstring(error) code = error[0][1].text message = error[0][2].text return code, message
def list_employees(order_by='id'): ''' Show all employees for this company. CLI Example: salt myminion bamboohr.list_employees By default, the return data will be keyed by ID. However, it can be ordered by any other field. Keep in mind that if the field that is chosen contains duplicate values (i.e., location is used, for a company which only has one location), then each duplicate value will be overwritten by the previous. Therefore, it is advisable to only sort by fields that are guaranteed to be unique. CLI Examples: salt myminion bamboohr.list_employees order_by=id salt myminion bamboohr.list_employees order_by=displayName salt myminion bamboohr.list_employees order_by=workEmail ''' ret = {} status, result = _query(action='employees', command='directory') root = ET.fromstring(result) directory = root.getchildren() for cat in directory: if cat.tag != 'employees': continue for item in cat: emp_id = item.items()[0][1] emp_ret = {'id': emp_id} for details in item.getchildren(): emp_ret[details.items()[0][1]] = details.text ret[emp_ret[order_by]] = emp_ret return ret
def list_users(order_by='id'): ''' Show all users for this company. CLI Example: salt myminion bamboohr.list_users By default, the return data will be keyed by ID. However, it can be ordered by any other field. Keep in mind that if the field that is chosen contains duplicate values (i.e., location is used, for a company which only has one location), then each duplicate value will be overwritten by the previous. Therefore, it is advisable to only sort by fields that are guaranteed to be unique. CLI Examples: salt myminion bamboohr.list_users order_by=id salt myminion bamboohr.list_users order_by=email ''' ret = {} status, result = _query(action='meta', command='users') root = ET.fromstring(result) users = root.getchildren() for user in users: user_id = None user_ret = {} for item in user.items(): user_ret[item[0]] = item[1] if item[0] == 'id': user_id = item[1] for item in user.getchildren(): user_ret[item.tag] = item.text ret[user_ret[order_by]] = user_ret return ret
def test_gen_vol_xml_for_esxi(self): xml_data = virt._gen_vol_xml('vmname', 'system', 8192, 'esxi') root = ET.fromstring(xml_data) self.assertEqual(root.find('name').text, 'vmname/system.vmdk') self.assertEqual(root.find('key').text, 'vmname/system') self.assertEqual(root.find('capacity').attrib['unit'], 'KiB') self.assertEqual(root.find('capacity').text, str(8192 * 1024))
def _get_artifact_metadata(artifactory_url, repository, group_id, artifact_id, headers): metadata_xml = _get_artifact_metadata_xml(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, headers=headers) root = ET.fromstring(metadata_xml) assert group_id == root.find('groupId').text assert artifact_id == root.find('artifactId').text latest_version = root.find('versioning').find('latest').text return { 'latest_version': latest_version }
def mksls(src, dst=None): ''' Convert an AutoYAST file to an SLS file ''' with salt.utils.fopen(src, 'r') as fh_: ps_opts = xml.to_dict(ET.fromstring(fh_.read())) if dst is not None: with salt.utils.fopen(dst, 'w') as fh_: fh_.write(yaml.safe_dump(ps_opts, default_flow_style=False)) else: return yaml.safe_dump(ps_opts, default_flow_style=False)
def test_boot_default_dev(self): diskp = virt._disk_profile('default', 'kvm') nicp = virt._nic_profile('default', 'kvm') xml_data = virt._gen_xml( 'hello', 1, 512, diskp, nicp, 'kvm' ) root = ET.fromstring(xml_data) self.assertEqual(root.find('os/boot').attrib['dev'], 'hd')
def _get_artifact_metadata(artifactory_url, repository, group_id, artifact_id, headers): metadata_xml = _get_artifact_metadata_xml( artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, headers=headers, ) root = ET.fromstring(metadata_xml) assert group_id == root.find("groupId").text assert artifact_id == root.find("artifactId").text latest_version = root.find("versioning").find("latest").text return {"latest_version": latest_version}
def test_controller_for_kvm(self): diskp = virt._disk_profile('default', 'kvm') nicp = virt._nic_profile('default', 'kvm') xml_data = virt._gen_xml( 'hello', 1, 512, diskp, nicp, 'kvm' ) root = ET.fromstring(xml_data) controllers = root.findall('.//devices/controller') # There should be no controller self.assertTrue(len(controllers) == 0)
def test_boot_multiple_devs(self): diskp = virt._disk_profile('default', 'kvm') nicp = virt._nic_profile('default', 'kvm') xml_data = virt._gen_xml( 'hello', 1, 512, diskp, nicp, 'kvm', boot_dev='cdrom network' ) root = ET.fromstring(xml_data) devs = root.findall('.//boot') self.assertTrue(len(devs) == 2)
def test_gen_xml_for_serial_console(self): diskp = virt._disk_profile('default', 'kvm') nicp = virt._nic_profile('default', 'kvm') xml_data = virt._gen_xml( 'hello', 1, 512, diskp, nicp, 'kvm', serial_type='pty', console=True ) root = ET.fromstring(xml_data) self.assertEqual(root.find('devices/serial').attrib['type'], 'pty') self.assertEqual(root.find('devices/console').attrib['type'], 'pty')
def test_controller_for_esxi(self): diskp = virt._disk_profile('default', 'esxi') nicp = virt._nic_profile('default', 'esxi') xml_data = virt._gen_xml( 'hello', 1, 512, diskp, nicp, 'esxi' ) root = ET.fromstring(xml_data) controllers = root.findall('.//devices/controller') self.assertTrue(len(controllers) == 1) controller = controllers[0] self.assertEqual(controller.attrib['model'], 'lsilogic')
def test_gen_xml_for_telnet_console_unspecified_port(self): diskp = virt._disk_profile('default', 'kvm') nicp = virt._nic_profile('default', 'kvm') xml_data = virt._gen_xml( 'hello', 1, 512, diskp, nicp, 'kvm', serial_type='tcp', console=True ) root = ET.fromstring(xml_data) self.assertEqual(root.find('devices/serial').attrib['type'], 'tcp') self.assertEqual(root.find('devices/console').attrib['type'], 'tcp') self.assertIsInstance(int(root.find('devices/console/source').attrib['service']), int)
def _make_mock_device(xml_def): mocked_conn = virt.libvirt.openAuth.return_value if not isinstance(mocked_conn.nodeDeviceLookupByName, MappedResultMock): mocked_conn.nodeDeviceLookupByName = MappedResultMock() doc = ET.fromstring(xml_def) name = doc.find("./name").text mocked_conn.nodeDeviceLookupByName.add(name) mocked_device = mocked_conn.nodeDeviceLookupByName(name) mocked_device.name.return_value = name mocked_device.XMLDesc.return_value = xml_def mocked_device.listCaps.return_value = [ cap.get("type") for cap in doc.findall("./capability") ] return mocked_device
def test_controller_for_kvm(self): diskp = virt._disk_profile('default', 'kvm') nicp = virt._nic_profile('default', 'kvm') xml_data = virt._gen_xml( 'hello', 1, 512, diskp, nicp, 'kvm' ) root = ET.fromstring(xml_data) controllers = root.findall('.//devices/controller') # There should be no controller self.assertTrue(len(controllers) == 0) # kvm mac address shoud start with 52:54:00 self.assertTrue("mac address='52:54:00" in xml_data)
def get_config(instance=_DEFAULT_INSTANCE): ''' Determine the configuration of the provided instance. :param str instance: The name of the Tentacle instance. :return: A dictionary containing the configuration data. :rtype: dict CLI Example: .. code-block:: bash salt '*' octopus_tentacle.get_config instance='Tentacle' ''' ret = dict() name_mapping = {'Octopus.Home': 'home_path', 'Octopus.Communications.Squid': 'squid', 'Tentacle.CertificateThumbprint': 'thumbprint', 'Tentacle.Communication.TrustedOctopusServers': 'servers', 'Tentacle.Deployment.ApplicationDirectory': 'app_path', 'Tentacle.Services.NoListen': 'comms', 'Tentacle.Services.PortNumber': 'port'} config_path = get_config_path(instance) if not os.path.isfile(config_path): _LOG.error('Unable to get configuration file for instance: %s', instance) return ret with salt.utils.fopen(config_path, 'r') as fh_: config = _parse_config(ElementTree.fromstring(fh_.read())) for item in config: # Skip keys that we aren't specifically looking for. if item in name_mapping: # Convert the NoListen value to a friendly value. if name_mapping[item] == 'comms': for comms_style in _COMMS_STYLES: if config[item] == _COMMS_STYLES[comms_style]: ret[name_mapping[item]] = comms_style break else: ret[name_mapping[item]] = config[item] return ret
def test_gen_xml_for_telnet_console(self): diskp = virt._disk_profile('default', 'kvm') nicp = virt._nic_profile('default', 'kvm') xml_data = virt._gen_xml('hello', 1, 512, diskp, nicp, 'kvm', serial_type='tcp', console=True, telnet_port=22223) root = ET.fromstring(xml_data) self.assertEqual(root.find('devices/serial').attrib['type'], 'tcp') self.assertEqual(root.find('devices/console').attrib['type'], 'tcp') self.assertEqual( root.find('devices/console/source').attrib['service'], '22223')
def test_gen_xml_for_kvm_custom_profile(self): diskp_yaml = ''' - first: size: 8192 format: qcow2 model: virtio pool: /var/lib/images - second: size: 4096 format: qcow2 # FIX remove line, currently test fails model: virtio # FIX remove line, currently test fails pool: /var/lib/images ''' nicp_yaml = ''' - type: bridge name: eth1 source: b2 model: virtio mac: '00:00:00:00:00:00' - name: eth2 type: bridge source: b2 model: virtio mac: '00:00:00:00:00:00' ''' with patch('salt.modules.virt._nic_profile') as nic_profile, \ patch('salt.modules.virt._disk_profile') as disk_profile: disk_profile.return_value = salt.utils.yaml.safe_load(diskp_yaml) nic_profile.return_value = salt.utils.yaml.safe_load(nicp_yaml) diskp = virt._disk_profile('noeffect', 'kvm') nicp = virt._nic_profile('noeffect', 'kvm') xml_data = virt._gen_xml( 'hello', 1, 512, diskp, nicp, 'kvm', ) root = ET.fromstring(xml_data) self.assertEqual(root.attrib['type'], 'kvm') self.assertEqual(root.find('vcpu').text, '1') self.assertEqual(root.find('memory').text, str(512 * 1024)) self.assertEqual(root.find('memory').attrib['unit'], 'KiB') self.assertTrue(len(root.findall('.//disk')) == 2) self.assertTrue(len(root.findall('.//interface')) == 2)
def test_gen_xml_for_esxi_custom_profile(self): diskp_yaml = ''' - first: size: 8192 format: vmdk model: scsi pool: datastore1 - second: size: 4096 format: vmdk # FIX remove line, currently test fails model: scsi # FIX remove line, currently test fails pool: datastore2 ''' nicp_yaml = ''' - type: bridge name: eth1 source: ONENET model: e1000 mac: '00:00:00:00:00:00' - name: eth2 type: bridge source: TWONET model: e1000 mac: '00:00:00:00:00:00' ''' with patch('salt.modules.virt._nic_profile') as nic_profile, \ patch('salt.modules.virt._disk_profile') as disk_profile: disk_profile.return_value = yaml.load(diskp_yaml) nic_profile.return_value = yaml.load(nicp_yaml) diskp = virt._disk_profile('noeffect', 'esxi') nicp = virt._nic_profile('noeffect', 'esxi') xml_data = virt._gen_xml( 'hello', 1, 512, diskp, nicp, 'esxi', ) root = ET.fromstring(xml_data) self.assertEqual(root.attrib['type'], 'vmware') self.assertEqual(root.find('vcpu').text, '1') self.assertEqual(root.find('memory').text, str(512 * 1024)) self.assertEqual(root.find('memory').attrib['unit'], 'KiB') self.assertTrue(len(root.findall('.//disk')) == 2) self.assertTrue(len(root.findall('.//interface')) == 2)
def __execute_cmd(name, xml): """ Execute ilom commands """ ret = {name.replace("_", " "): {}} id_num = 0 tmp_dir = os.path.join(__opts__["cachedir"], "tmp") if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) with tempfile.NamedTemporaryFile( dir=tmp_dir, prefix=name + six.text_type(os.getpid()), suffix=".xml", mode="w", delete=False, ) as fh: tmpfilename = fh.name fh.write(xml) cmd = __salt__["cmd.run_all"]("hponcfg -f {0}".format(tmpfilename)) # Clean up the temp file __salt__["file.remove"](tmpfilename) if cmd["retcode"] != 0: for i in cmd["stderr"].splitlines(): if i.startswith(" MESSAGE="): return {"Failed": i.split("=")[-1]} return False try: for i in ET.fromstring("".join(cmd["stdout"].splitlines()[3:-1])): # Make sure dict keys don't collide if ret[name.replace("_", " ")].get(i.tag, False): ret[name.replace("_", " ")].update( {i.tag + "_" + six.text_type(id_num): i.attrib} ) id_num += 1 else: ret[name.replace("_", " ")].update({i.tag: i.attrib}) except SyntaxError: return True return ret
def _get_artifact_metadata(nexus_url, repository, group_id, artifact_id, headers): metadata_xml = _get_artifact_metadata_xml(nexus_url=nexus_url, repository=repository, group_id=group_id, artifact_id=artifact_id, headers=headers) root = ET.fromstring(metadata_xml) assert group_id == root.find('groupId').text assert artifact_id == root.find('artifactId').text versions = root.find('versioning').find('versions') versionList = [] for version in versions.iter('version'): versionList.append(version.text) latest_version = max(versionList) log.debug('latest version=%s', latest_version) return {'latest_version': latest_version}
def _get_snapshot_version_metadata(artifactory_url, repository, group_id, artifact_id, version, headers): metadata_xml = _get_snapshot_version_metadata_xml(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, headers=headers) metadata = ET.fromstring(metadata_xml) assert group_id == metadata.find('groupId').text assert artifact_id == metadata.find('artifactId').text assert version == metadata.find('version').text snapshot_versions = metadata.find('versioning').find('snapshotVersions') extension_version_dict = {} for snapshot_version in snapshot_versions: extension = snapshot_version.find('extension').text value = snapshot_version.find('value').text extension_version_dict[extension] = value return { 'snapshot_versions': extension_version_dict }
def test_gen_hypervisor_features(): """ Test the virt._gen_xml hypervisor_features handling """ xml_data = virt._gen_xml( virt.libvirt.openAuth.return_value, "hello", 1, 512, {}, {}, "kvm", "hvm", "x86_64", hypervisor_features={"kvm-hint-dedicated": True}, ) root = ET.fromstring(xml_data) assert "on" == root.find("features/kvm/hint-dedicated").attrib["state"]
def xml_doc(): return ET.fromstring(""" <domain> <name>test01</name> <memory unit="MiB">1024</memory> <cpu> <topology sockets="1"/> </cpu> <vcpus> <vcpu enabled="yes" id="1"/> </vcpus> <memtune> <hugepages> <page size="128"/> </hugepages> </memtune> </domain> """)
def test_init_hostdev_usb(make_capabilities, make_mock_device): """ Test virt.init with USB host device passed through """ make_capabilities() make_mock_device(""" <device> <name>usb_3_1_3</name> <path>/sys/devices/pci0000:00/0000:00:1d.6/0000:06:00.0/0000:07:02.0/0000:3e:00.0/usb3/3-1/3-1.3</path> <devnode type='dev'>/dev/bus/usb/003/004</devnode> <parent>usb_3_1</parent> <driver> <name>usb</name> </driver> <capability type='usb_device'> <bus>3</bus> <device>4</device> <product id='0x6006'>AUKEY PC-LM1E Camera</product> <vendor id='0x0458'>KYE Systems Corp. (Mouse Systems)</vendor> </capability> </device> """) with patch.dict(virt.os.__dict__, { "chmod": MagicMock(), "makedirs": MagicMock() }): with patch.dict(virt.__salt__, {"cmd.run": MagicMock()}): virt.init("test_vm", 2, 2048, host_devices=["usb_3_1_3"], start=False) define_mock = virt.libvirt.openAuth().defineXML setxml = ET.fromstring(define_mock.call_args[0][0]) expected_xml = strip_xml(""" <hostdev mode='subsystem' type='usb'> <source> <vendor id='0x0458'/> <product id='0x6006'/> </source> </hostdev> """) assert expected_xml == strip_xml( ET.tostring(setxml.find("./devices/hostdev")))
def __execute_cmd(name, xml): ''' Execute ilom commands ''' ret = {name.replace('_', ' '): {}} id_num = 0 tmp_dir = os.path.join(__opts__['cachedir'], 'tmp') if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) with tempfile.NamedTemporaryFile(dir=tmp_dir, prefix=name+str(os.getpid()), suffix='.xml', delete=False) as fh: tmpfilename = fh.name fh.write(xml) cmd = __salt__['cmd.run_all']('hponcfg -f {0}'.format(tmpfilename)) # Clean up the temp file __salt__['file.remove'](tmpfilename) if cmd['retcode'] != 0: for i in cmd['stderr'].splitlines(): if i.startswith(' MESSAGE='): return {'Failed': i.split('=')[-1]} return False try: for i in ET.fromstring(''.join(cmd['stdout'].splitlines()[3:-1])): # Make sure dict keys don't collide if ret[name.replace('_', ' ')].get(i.tag, False): ret[name.replace('_', ' ')].update( {i.tag + '_' + str(id_num): i.attrib} ) id_num += 1 else: ret[name.replace('_', ' ')].update( {i.tag: i.attrib} ) except SyntaxError: return True return ret
def test_gen_xml_for_kvm_default_profile(self): ''' Test virt._gen_xml(), KVM default profile case ''' diskp = virt._disk_profile('default', 'kvm') nicp = virt._nic_profile('default', 'kvm') xml_data = virt._gen_xml( 'hello', 1, 512, diskp, nicp, 'kvm', ) root = ET.fromstring(xml_data) self.assertEqual(root.attrib['type'], 'kvm') self.assertEqual(root.find('vcpu').text, '1') self.assertEqual(root.find('memory').text, six.text_type(512 * 1024)) self.assertEqual(root.find('memory').attrib['unit'], 'KiB') disks = root.findall('.//disk') self.assertEqual(len(disks), 1) disk = disks[0] root_dir = salt.config.DEFAULT_MINION_OPTS.get('root_dir') self.assertTrue( disk.find('source').attrib['file'].startswith(root_dir)) self.assertTrue( os.path.join('hello', 'system') in disk.find( 'source').attrib['file']) self.assertEqual(disk.find('target').attrib['dev'], 'vda') self.assertEqual(disk.find('target').attrib['bus'], 'virtio') self.assertEqual(disk.find('driver').attrib['name'], 'qemu') self.assertEqual(disk.find('driver').attrib['type'], 'qcow2') interfaces = root.findall('.//interface') self.assertEqual(len(interfaces), 1) iface = interfaces[0] self.assertEqual(iface.attrib['type'], 'bridge') self.assertEqual(iface.find('source').attrib['bridge'], 'br0') self.assertEqual(iface.find('model').attrib['type'], 'virtio') mac = iface.find('mac').attrib['address'] self.assertTrue( re.match('^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$', mac, re.I))
def test_gen_xml_for_esxi_custom_profile(self, disk_profile, nic_profile): diskp_yaml = ''' - first: size: 8192 format: vmdk model: scsi pool: datastore1 - second: size: 4096 format: vmdk # FIX remove line, currently test fails model: scsi # FIX remove line, currently test fails pool: datastore2 ''' nicp_yaml = ''' - type: bridge name: eth1 source: ONENET model: e1000 mac: '00:00:00:00:00:00' - name: eth2 type: bridge source: TWONET model: e1000 mac: '00:00:00:00:00:00' ''' disk_profile.return_value = yaml.load(diskp_yaml) nic_profile.return_value = yaml.load(nicp_yaml) diskp = virt._disk_profile('noeffect', 'esxi') nicp = virt._nic_profile('noeffect', 'esxi') xml_data = virt._gen_xml( 'hello', 1, 512, diskp, nicp, 'esxi', ) root = ET.fromstring(xml_data) self.assertEqual(root.attrib['type'], 'vmware') self.assertEqual(root.find('vcpu').text, '1') self.assertEqual(root.find('memory').text, str(512 * 1024)) self.assertEqual(root.find('memory').attrib['unit'], 'KiB') self.assertTrue(len(root.findall('.//disk')) == 2) self.assertTrue(len(root.findall('.//interface')) == 2)
def test_gen_xml_for_kvm_custom_profile(self, disk_profile, nic_profile): diskp_yaml = ''' - first: size: 8192 format: qcow2 model: virtio pool: /var/lib/images - second: size: 4096 format: qcow2 # FIX remove line, currently test fails model: virtio # FIX remove line, currently test fails pool: /var/lib/images ''' nicp_yaml = ''' - type: bridge name: eth1 source: b2 model: virtio mac: '00:00:00:00:00:00' - name: eth2 type: bridge source: b2 model: virtio mac: '00:00:00:00:00:00' ''' disk_profile.return_value = yaml.load(diskp_yaml) nic_profile.return_value = yaml.load(nicp_yaml) diskp = virt._disk_profile('noeffect', 'kvm') nicp = virt._nic_profile('noeffect', 'kvm') xml_data = virt._gen_xml( 'hello', 1, 512, diskp, nicp, 'kvm', ) root = ET.fromstring(xml_data) self.assertEqual(root.attrib['type'], 'kvm') self.assertEqual(root.find('vcpu').text, '1') self.assertEqual(root.find('memory').text, str(512 * 1024)) self.assertEqual(root.find('memory').attrib['unit'], 'KiB') self.assertTrue(len(root.findall('.//disk')) == 2) self.assertTrue(len(root.findall('.//interface')) == 2)
def _get_artifact_metadata(artifactory_url, repository, group_id, artifact_id, headers, use_literal_group_id=False): metadata_xml = _get_artifact_metadata_xml( artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, headers=headers, use_literal_group_id=use_literal_group_id) root = ET.fromstring(metadata_xml) assert group_id == root.find('groupId').text assert artifact_id == root.find('artifactId').text latest_version = root.find('versioning').find('latest').text return {'latest_version': latest_version}
def _get_artifact_metadata(nexus_url, repository, group_id, artifact_id, headers): metadata_xml = _get_artifact_metadata_xml( nexus_url=nexus_url, repository=repository, group_id=group_id, artifact_id=artifact_id, headers=headers, ) root = ET.fromstring(metadata_xml) assert group_id == root.find("groupId").text assert artifact_id == root.find("artifactId").text versions = root.find("versioning").find("versions") versionList = [] for version in versions.iter("version"): versionList.append(version.text) latest_version = max(versionList) log.debug("latest version=%s", latest_version) return {"latest_version": latest_version}
def test_gen_xml_for_telnet_no_console(self): ''' Test virt_gen_xml() with no telnet console ''' diskp = virt._disk_profile('default', 'kvm') nicp = virt._nic_profile('default', 'kvm') xml_data = virt._gen_xml( 'hello', 1, 512, diskp, nicp, 'kvm', serial_type='tcp', console=False, ) root = ET.fromstring(xml_data) self.assertEqual(root.find('devices/serial').attrib['type'], 'tcp') self.assertEqual(root.find('devices/console'), None)
def test_gen_xml_for_telnet_console_unspecified_port(self): ''' Test virt_gen_xml() telnet console without any specified port ''' diskp = virt._disk_profile('default', 'kvm') nicp = virt._nic_profile('default', 'kvm') xml_data = virt._gen_xml('hello', 1, 512, diskp, nicp, 'kvm', serial_type='tcp', console=True) root = ET.fromstring(xml_data) self.assertEqual(root.find('devices/serial').attrib['type'], 'tcp') self.assertEqual(root.find('devices/console').attrib['type'], 'tcp') self.assertIsInstance( int(root.find('devices/console/source').attrib['service']), int)
def test_init_hostdev_pci(make_capabilities, make_mock_device): """ Test virt.init with PCI host device passed through """ make_capabilities() make_mock_device(""" <device> <name>pci_1002_71c4</name> <parent>pci_8086_27a1</parent> <capability type='pci'> <class>0xffffff</class> <domain>0</domain> <bus>1</bus> <slot>0</slot> <function>0</function> <product id='0x71c4'>M56GL [Mobility FireGL V5200]</product> <vendor id='0x1002'>ATI Technologies Inc</vendor> <numa node='1'/> </capability> </device> """) with patch.dict(virt.os.__dict__, { "chmod": MagicMock(), "makedirs": MagicMock() }): with patch.dict(virt.__salt__, {"cmd.run": MagicMock()}): virt.init("test_vm", 2, 2048, host_devices=["pci_1002_71c4"], start=False) define_mock = virt.libvirt.openAuth().defineXML setxml = ET.fromstring(define_mock.call_args[0][0]) expected_xml = strip_xml(""" <hostdev mode='subsystem' type='pci' managed='yes'> <source> <address domain='0x0000' bus='0x01' slot='0x00' function='0x0'/> </source> </hostdev> """) assert expected_xml == strip_xml( ET.tostring(setxml.find("./devices/hostdev")))
def test_prepare_xml(): hostname = "salt" service = "salt-minion" opts = { "hostname": hostname, "service": service, "checktype": "active", } xml_ret = nagios_nrdp_return._prepare_xml(options=opts) root = ET.fromstring(xml_ret) checkresult = root.find("checkresult") hostname_res = checkresult.find("hostname").text servicename_res = checkresult.find("servicename").text # Verify the regular XML format. assert servicename_res == service assert hostname_res == hostname
def list_meta_fields(): """ Show all meta data fields for this company. CLI Example: salt myminion bamboohr.list_meta_fields """ ret = {} status, result = _query(action="meta", command="fields") root = ET.fromstring(result) for field in root: field_id = None field_ret = {"name": field.text} for item in field.items(): field_ret[item[0]] = item[1] if item[0] == "id": field_id = item[1] ret[field_id] = field_ret return ret
def test_gen_xml_for_esxi_default_profile(self): ''' Test virt._gen_xml(), ESXi default profile case ''' diskp = virt._disk_profile('default', 'esxi') nicp = virt._nic_profile('default', 'esxi') xml_data = virt._gen_xml( 'hello', 1, 512, diskp, nicp, 'esxi', ) root = ET.fromstring(xml_data) self.assertEqual(root.attrib['type'], 'vmware') self.assertEqual(root.find('vcpu').text, '1') self.assertEqual(root.find('memory').text, six.text_type(512 * 1024)) self.assertEqual(root.find('memory').attrib['unit'], 'KiB') disks = root.findall('.//disk') self.assertEqual(len(disks), 1) disk = disks[0] self.assertTrue('[0]' in disk.find('source').attrib['file']) self.assertTrue( os.path.join('hello', 'system') in disk.find( 'source').attrib['file']) self.assertEqual(disk.find('target').attrib['dev'], 'sda') self.assertEqual(disk.find('target').attrib['bus'], 'scsi') self.assertEqual(disk.find('address').attrib['unit'], '0') interfaces = root.findall('.//interface') self.assertEqual(len(interfaces), 1) iface = interfaces[0] self.assertEqual(iface.attrib['type'], 'bridge') self.assertEqual(iface.find('source').attrib['bridge'], 'DEFAULT') self.assertEqual(iface.find('model').attrib['type'], 'e1000') mac = iface.find('mac').attrib['address'] self.assertTrue( re.match('^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$', mac, re.I))
def _make_mock_vm(xml_def, running=False, inactive_def=None): mocked_conn = virt.libvirt.openAuth.return_value doc = ET.fromstring(xml_def) name = doc.find("name").text os_type = "hvm" os_type_node = doc.find("os/type") if os_type_node is not None: os_type = os_type_node.text mocked_conn.listDefinedDomains.return_value = [name] # Configure the mocked domain if not isinstance(mocked_conn.lookupByName, MappedResultMock): mocked_conn.lookupByName = MappedResultMock() mocked_conn.lookupByName.add(name) domain_mock = mocked_conn.lookupByName(name) domain_mock.XMLDesc = MappedResultMock() domain_mock.XMLDesc.add(0, xml_def) domain_mock.XMLDesc.add(virt.libvirt.VIR_DOMAIN_XML_INACTIVE, inactive_def or xml_def) domain_mock.OSType.return_value = os_type # Return state as shutdown domain_mock.info.return_value = [ 0 if running else 4, 2048 * 1024, 1024 * 1024, 2, 1234, ] domain_mock.ID.return_value = 1 domain_mock.name.return_value = name domain_mock.attachDevice.return_value = 0 domain_mock.detachDevice.return_value = 0 domain_mock.connect.return_value = mocked_conn return domain_mock
def __init__(self, boto_exception): self.status = boto_exception.status self.reason = boto_exception.reason try: body = boto_exception.body or '' error = ET.fromstring(body).find('Errors').find('Error') self.error = {'code': error.find('Code').text, 'message': error.find('Message').text} except (AttributeError, ET.ParseError): self.error = None status = self.status or '' reason = self.reason or '' error = self.error or {} if error: message = '{0} {1}: {2}'.format(status, reason, error.get('message')) else: message = '{0} {1}'.format(status, reason) super(BotoExecutionError, self).__init__(message)
def list_meta_fields(): ''' Show all meta data fields for this company. CLI Example: salt myminion bamboohr.list_meta_fields ''' ret = {} status, result = _query(action='meta', command='fields') root = ET.fromstring(result) fields = root.getchildren() for field in fields: field_id = None field_ret = {'name': field.text} for item in field.items(): field_ret[item[0]] = item[1] if item[0] == 'id': field_id = item[1] ret[field_id] = field_ret return ret
def test_update_stop_on_reboot(make_mock_vm): """ Test virt.update to add the on_reboot=destroy flag """ xml_def = """ <domain type='kvm'> <name>my_vm</name> <memory unit='KiB'>524288</memory> <currentMemory unit='KiB'>524288</currentMemory> <vcpu placement='static'>1</vcpu> <os> <type arch='x86_64'>hvm</type> </os> </domain>""" domain_mock = make_mock_vm(xml_def) ret = virt.update("my_vm", stop_on_reboot=True) assert ret["definition"] define_mock = virt.libvirt.openAuth().defineXML setxml = ET.fromstring(define_mock.call_args[0][0]) assert "destroy" == setxml.find("./on_reboot").text
def _get_artifact_metadata( artifactory_url, repository, group_id, artifact_id, headers, use_literal_group_id=False, ): metadata_xml = _get_artifact_metadata_xml( artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, headers=headers, use_literal_group_id=use_literal_group_id, ) root = ET.fromstring(metadata_xml) assert group_id == root.find("groupId").text assert artifact_id == root.find("artifactId").text latest_version = root.find("versioning").find("latest").text return {"latest_version": latest_version}
def logon(): ''' Logs into the cimc device and returns the session cookie. ''' content = {} payload = "<aaaLogin inName='{0}' inPassword='******'></aaaLogin>".format(DETAILS['username'], DETAILS['password']) r = __utils__['http.query'](DETAILS['url'], data=payload, method='POST', decode_type='plain', decode=True, verify_ssl=False, raise_error=False, headers=DETAILS['headers']) answer = re.findall(r'(<[\s\S.]*>)', r['text'])[0] items = ET.fromstring(answer) for item in items.attrib: content[item] = items.attrib[item] if 'outCookie' not in content: raise salt.exceptions.CommandExecutionError("Unable to log into proxy device.") return content['outCookie']
def _get_snapshot_version_metadata(artifactory_url, repository, group_id, artifact_id, version, headers): metadata_xml = _get_snapshot_version_metadata_xml( artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, headers=headers, ) metadata = ET.fromstring(metadata_xml) assert group_id == metadata.find("groupId").text assert artifact_id == metadata.find("artifactId").text assert version == metadata.find("version").text snapshot_versions = metadata.find("versioning").find("snapshotVersions") extension_version_dict = {} for snapshot_version in snapshot_versions: extension = snapshot_version.find("extension").text value = snapshot_version.find("value").text extension_version_dict[extension] = value return {"snapshot_versions": extension_version_dict}
def _make_mock_net(xml_def): mocked_conn = virt.libvirt.openAuth.return_value doc = ET.fromstring(xml_def) name = doc.find("name").text if not isinstance(mocked_conn.networkLookupByName, MappedResultMock): mocked_conn.networkLookupByName = MappedResultMock() mocked_conn.networkLookupByName.add(name) net_mock = mocked_conn.networkLookupByName(name) net_mock.XMLDesc.return_value = xml_def # libvirt defaults the autostart to unset net_mock.autostart.return_value = 0 # Append the network to listAllNetworks return value all_nets = mocked_conn.listAllNetworks.return_value if not isinstance(all_nets, list): all_nets = [] all_nets.append(net_mock) mocked_conn.listAllNetworks.return_value = all_nets return net_mock
def test_gen_xml_for_kvm_default_profile(self): diskp = virt._disk_profile('default', 'kvm') nicp = virt._nic_profile('default', 'kvm') xml_data = virt._gen_xml( 'hello', 1, 512, diskp, nicp, 'kvm', ) root = ET.fromstring(xml_data) self.assertEqual(root.attrib['type'], 'kvm') self.assertEqual(root.find('vcpu').text, '1') self.assertEqual(root.find('memory').text, str(512 * 1024)) self.assertEqual(root.find('memory').attrib['unit'], 'KiB') disks = root.findall('.//disk') self.assertEqual(len(disks), 1) disk = disks[0] self.assertTrue(disk.find('source').attrib['file'].startswith('/')) self.assertTrue('hello/system' in disk.find('source').attrib['file']) self.assertEqual(disk.find('target').attrib['dev'], 'vda') self.assertEqual(disk.find('target').attrib['bus'], 'virtio') self.assertEqual(disk.find('driver').attrib['name'], 'qemu') self.assertEqual(disk.find('driver').attrib['type'], 'qcow2') interfaces = root.findall('.//interface') self.assertEqual(len(interfaces), 1) iface = interfaces[0] self.assertEqual(iface.attrib['type'], 'bridge') self.assertEqual(iface.find('source').attrib['bridge'], 'br0') self.assertEqual(iface.find('model').attrib['type'], 'virtio') mac = iface.find('mac').attrib['address'] self.assertTrue( re.match('^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$', mac, re.I))
def test_gen_xml_for_esxi_default_profile(self): diskp = virt._disk_profile('default', 'esxi') nicp = virt._nic_profile('default', 'esxi') xml_data = virt._gen_xml( 'hello', 1, 512, diskp, nicp, 'esxi', ) root = ET.fromstring(xml_data) self.assertEqual(root.attrib['type'], 'vmware') self.assertEqual(root.find('vcpu').text, '1') self.assertEqual(root.find('memory').text, str(512 * 1024)) self.assertEqual(root.find('memory').attrib['unit'], 'KiB') disks = root.findall('.//disk') self.assertEqual(len(disks), 1) disk = disks[0] self.assertTrue('[0]' in disk.find('source').attrib['file']) self.assertTrue('hello/system' in disk.find('source').attrib['file']) self.assertEqual(disk.find('target').attrib['dev'], 'sda') self.assertEqual(disk.find('target').attrib['bus'], 'scsi') self.assertEqual(disk.find('address').attrib['unit'], '0') interfaces = root.findall('.//interface') self.assertEqual(len(interfaces), 1) iface = interfaces[0] self.assertEqual(iface.attrib['type'], 'bridge') self.assertEqual(iface.find('source').attrib['bridge'], 'DEFAULT') self.assertEqual(iface.find('model').attrib['type'], 'e1000') mac = iface.find('mac').attrib['address'] self.assertTrue( re.match('^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$', mac, re.I))
def _make_mock_vm(xml_def): mocked_conn = virt.libvirt.openAuth.return_value doc = ET.fromstring(xml_def) name = doc.find("name").text os_type = "hvm" os_type_node = doc.find("os/type") if os_type_node is not None: os_type = os_type_node.text mocked_conn.listDefinedDomains.return_value = [name] # Configure the mocked domain domain_mock = virt.libvirt.virDomain() if not isinstance(mocked_conn.lookupByName, MappedResultMock): mocked_conn.lookupByName = MappedResultMock() mocked_conn.lookupByName.add(name) domain_mock = mocked_conn.lookupByName(name) domain_mock.XMLDesc.return_value = xml_def domain_mock.OSType.return_value = os_type # Return state as shutdown domain_mock.info.return_value = [ 4, 2048 * 1024, 1024 * 1024, 2, 1234, ] domain_mock.ID.return_value = 1 domain_mock.name.return_value = name domain_mock.attachDevice.return_value = 0 domain_mock.detachDevice.return_value = 0 return domain_mock
def query(url, method='GET', params=None, data=None, data_file=None, header_dict=None, header_list=None, header_file=None, username=None, password=None, auth=None, decode=False, decode_type='auto', status=False, headers=False, text=False, cookies=None, cookie_jar=None, cookie_format='lwp', persist_session=False, session_cookie_jar=None, data_render=False, data_renderer=None, header_render=False, header_renderer=None, template_dict=None, test=False, test_url=None, node='minion', port=80, opts=None, backend='tornado', requests_lib=None, ca_bundle=None, verify_ssl=None, cert=None, text_out=None, headers_out=None, decode_out=None, stream=False, streaming_callback=None, handle=False, agent=USERAGENT, hide_fields=None, **kwargs): ''' Query a resource, and decode the return data ''' ret = {} if opts is None: if node == 'master': opts = salt.config.master_config( os.path.join(syspaths.CONFIG_DIR, 'master') ) elif node == 'minion': opts = salt.config.minion_config( os.path.join(syspaths.CONFIG_DIR, 'minion') ) else: opts = {} if requests_lib is None: requests_lib = opts.get('requests_lib', False) if requests_lib is True: log.warn('Please set "backend" to "requests" instead of setting ' '"requests_lib" to "True"') if HAS_REQUESTS is False: ret['error'] = ('http.query has been set to use requests, but the ' 'requests library does not seem to be installed') log.error(ret['error']) return ret backend = 'requests' else: requests_log = logging.getLogger('requests') requests_log.setLevel(logging.WARNING) # Some libraries don't support separation of url and GET parameters # Don't need a try/except block, since Salt depends on tornado url_full = tornado.httputil.url_concat(url, params) if ca_bundle is None: ca_bundle = get_ca_bundle(opts) if verify_ssl is None: verify_ssl = opts.get('verify_ssl', True) if cert is None: cert = opts.get('cert', None) if data_file is not None: data = _render( data_file, data_render, data_renderer, template_dict, opts ) # Make sure no secret fields show up in logs log_url = sanitize_url(url_full, hide_fields) log.debug('Requesting URL {0} using {1} method'.format(log_url, method)) if method == 'POST': # Make sure no secret fields show up in logs if isinstance(data, dict): log_data = data.copy() if isinstance(hide_fields, list): for item in data: for field in hide_fields: if item == field: log_data[item] = 'XXXXXXXXXX' log.trace('Request POST Data: {0}'.format(pprint.pformat(log_data))) else: log.trace('Request POST Data: {0}'.format(pprint.pformat(data))) if header_file is not None: header_tpl = _render( header_file, header_render, header_renderer, template_dict, opts ) if isinstance(header_tpl, dict): header_dict = header_tpl else: header_list = header_tpl.splitlines() if header_dict is None: header_dict = {} if header_list is None: header_list = [] if cookie_jar is None: cookie_jar = os.path.join(opts.get('cachedir', syspaths.CACHE_DIR), 'cookies.txt') if session_cookie_jar is None: session_cookie_jar = os.path.join(opts.get('cachedir', syspaths.CACHE_DIR), 'cookies.session.p') if persist_session is True and HAS_MSGPACK: # TODO: This is hackish; it will overwrite the session cookie jar with # all cookies from this one connection, rather than behaving like a # proper cookie jar. Unfortunately, since session cookies do not # contain expirations, they can't be stored in a proper cookie jar. if os.path.isfile(session_cookie_jar): with salt.utils.fopen(session_cookie_jar, 'rb') as fh_: session_cookies = msgpack.load(fh_) if isinstance(session_cookies, dict): header_dict.update(session_cookies) else: with salt.utils.fopen(session_cookie_jar, 'wb') as fh_: msgpack.dump('', fh_) for header in header_list: comps = header.split(':') if len(comps) < 2: continue header_dict[comps[0].strip()] = comps[1].strip() if not auth: if username and password: auth = (username, password) if agent == USERAGENT: agent = '{0} http.query()'.format(agent) header_dict['User-agent'] = agent if backend == 'requests': sess = requests.Session() sess.auth = auth sess.headers.update(header_dict) log.trace('Request Headers: {0}'.format(sess.headers)) sess_cookies = sess.cookies sess.verify = verify_ssl elif backend == 'urllib2': sess_cookies = None else: # Tornado sess_cookies = None if cookies is not None: if cookie_format == 'mozilla': sess_cookies = salt.ext.six.moves.http_cookiejar.MozillaCookieJar(cookie_jar) else: sess_cookies = salt.ext.six.moves.http_cookiejar.LWPCookieJar(cookie_jar) if not os.path.isfile(cookie_jar): sess_cookies.save() sess_cookies.load() if test is True: if test_url is None: return {} else: url = test_url ret['test'] = True if backend == 'requests': req_kwargs = {} if stream is True: if requests.__version__[0] == '0': # 'stream' was called 'prefetch' before 1.0, with flipped meaning req_kwargs['prefetch'] = False else: req_kwargs['stream'] = True # Client-side cert handling if cert is not None: if isinstance(cert, six.string_types): if os.path.exists(cert): req_kwargs['cert'] = cert elif isinstance(cert, tuple): if os.path.exists(cert[0]) and os.path.exists(cert[1]): req_kwargs['cert'] = cert else: log.error('The client-side certificate path that was passed is ' 'not valid: {0}'.format(cert)) result = sess.request( method, url, params=params, data=data, **req_kwargs ) result.raise_for_status() if stream is True or handle is True: return { 'handle': result, 'body': result.content, } log.debug('Final URL location of Response: {0}'.format(sanitize_url(result.url, hide_fields))) result_status_code = result.status_code result_headers = result.headers result_text = result.content result_cookies = result.cookies ret['body'] = result.content elif backend == 'urllib2': request = urllib_request.Request(url_full, data) handlers = [ urllib_request.HTTPHandler, urllib_request.HTTPCookieProcessor(sess_cookies) ] if url.startswith('https'): hostname = request.get_host() handlers[0] = urllib_request.HTTPSHandler(1) if not HAS_MATCHHOSTNAME: log.warn(('match_hostname() not available, SSL hostname checking ' 'not available. THIS CONNECTION MAY NOT BE SECURE!')) elif verify_ssl is False: log.warn(('SSL certificate verification has been explicitly ' 'disabled. THIS CONNECTION MAY NOT BE SECURE!')) else: if ':' in hostname: hostname, port = hostname.split(':') else: port = 443 sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((hostname, int(port))) sockwrap = ssl.wrap_socket( sock, ca_certs=ca_bundle, cert_reqs=ssl.CERT_REQUIRED ) try: match_hostname(sockwrap.getpeercert(), hostname) except CertificateError as exc: ret['error'] = ( 'The certificate was invalid. ' 'Error returned was: {0}'.format( pprint.pformat(exc) ) ) return ret # Client-side cert handling if cert is not None: cert_chain = None if isinstance(cert, six.string_types): if os.path.exists(cert): cert_chain = (cert) elif isinstance(cert, tuple): if os.path.exists(cert[0]) and os.path.exists(cert[1]): cert_chain = cert else: log.error('The client-side certificate path that was ' 'passed is not valid: {0}'.format(cert)) return if hasattr(ssl, 'SSLContext'): # Python >= 2.7.9 context = ssl.SSLContext.load_cert_chain(*cert_chain) handlers.append(urllib_request.HTTPSHandler(context=context)) # pylint: disable=E1123 else: # Python < 2.7.9 cert_kwargs = { 'host': request.get_host(), 'port': port, 'cert_file': cert_chain[0] } if len(cert_chain) > 1: cert_kwargs['key_file'] = cert_chain[1] handlers[0] = salt.ext.six.moves.http_client.HTTPSConnection(**cert_kwargs) opener = urllib_request.build_opener(*handlers) for header in header_dict: request.add_header(header, header_dict[header]) request.get_method = lambda: method try: result = opener.open(request) except URLError as exc: return {'Error': str(exc)} if stream is True or handle is True: return { 'handle': result, 'body': result.content, } result_status_code = result.code result_headers = result.headers.headers result_text = result.read() ret['body'] = result_text else: # Tornado req_kwargs = {} # Client-side cert handling if cert is not None: if isinstance(cert, six.string_types): if os.path.exists(cert): req_kwargs['client_cert'] = cert elif isinstance(cert, tuple): if os.path.exists(cert[0]) and os.path.exists(cert[1]): req_kwargs['client_cert'] = cert[0] req_kwargs['client_key'] = cert[1] else: log.error('The client-side certificate path that was passed is ' 'not valid: {0}'.format(cert)) if isinstance(data, dict): data = urllib.urlencode(data) if verify_ssl: req_kwargs['ca_certs'] = ca_bundle max_body = opts.get('http_max_body', salt.config.DEFAULT_MINION_OPTS['http_max_body']) timeout = opts.get('http_request_timeout', salt.config.DEFAULT_MINION_OPTS['http_request_timeout']) client_argspec = None proxy_host = opts.get('proxy_host', None) proxy_port = opts.get('proxy_port', None) proxy_username = opts.get('proxy_username', None) proxy_password = opts.get('proxy_password', None) # We want to use curl_http if we have a proxy defined if proxy_host and proxy_port: if HAS_CURL_HTTPCLIENT is False: ret['error'] = ('proxy_host and proxy_port has been set. This requires pycurl, but the ' 'pycurl library does not seem to be installed') log.error(ret['error']) return ret tornado.httpclient.AsyncHTTPClient.configure('tornado.curl_httpclient.CurlAsyncHTTPClient') client_argspec = inspect.getargspec(tornado.curl_httpclient.CurlAsyncHTTPClient.initialize) else: client_argspec = inspect.getargspec(tornado.simple_httpclient.SimpleAsyncHTTPClient.initialize) supports_max_body_size = 'max_body_size' in client_argspec.args try: if supports_max_body_size: result = HTTPClient(max_body_size=max_body).fetch( url_full, method=method, headers=header_dict, auth_username=username, auth_password=password, body=data, validate_cert=verify_ssl, allow_nonstandard_methods=True, streaming_callback=streaming_callback, request_timeout=timeout, proxy_host=proxy_host, proxy_port=proxy_port, proxy_username=proxy_username, proxy_password=proxy_password, **req_kwargs ) else: result = HTTPClient().fetch( url_full, method=method, headers=header_dict, auth_username=username, auth_password=password, body=data, validate_cert=verify_ssl, allow_nonstandard_methods=True, streaming_callback=streaming_callback, request_timeout=timeout, proxy_host=proxy_host, proxy_port=proxy_port, proxy_username=proxy_username, proxy_password=proxy_password, **req_kwargs ) except tornado.httpclient.HTTPError as exc: ret['status'] = exc.code ret['error'] = str(exc) return ret if stream is True or handle is True: return { 'handle': result, 'body': result.body, } result_status_code = result.code result_headers = result.headers result_text = result.body ret['body'] = result.body if 'Set-Cookie' in result_headers.keys() and cookies is not None: result_cookies = parse_cookie_header(result_headers['Set-Cookie']) for item in result_cookies: sess_cookies.set_cookie(item) else: result_cookies = None if isinstance(result_headers, list): result_headers_dict = {} for header in result_headers: comps = header.split(':') result_headers_dict[comps[0].strip()] = ':'.join(comps[1:]).strip() result_headers = result_headers_dict log.debug('Response Status Code: {0}'.format(result_status_code)) log.trace('Response Headers: {0}'.format(result_headers)) log.trace('Response Cookies: {0}'.format(sess_cookies)) try: log.trace('Response Text: {0}'.format(result_text)) except UnicodeEncodeError as exc: log.trace(('Cannot Trace Log Response Text: {0}. This may be due to ' 'incompatibilities between requests and logging.').format(exc)) if text_out is not None and os.path.exists(text_out): with salt.utils.fopen(text_out, 'w') as tof: tof.write(result_text) if headers_out is not None and os.path.exists(headers_out): with salt.utils.fopen(headers_out, 'w') as hof: hof.write(result_headers) if cookies is not None: sess_cookies.save() if persist_session is True and HAS_MSGPACK: # TODO: See persist_session above if 'set-cookie' in result_headers: with salt.utils.fopen(session_cookie_jar, 'wb') as fh_: session_cookies = result_headers.get('set-cookie', None) if session_cookies is not None: msgpack.dump({'Cookie': session_cookies}, fh_) else: msgpack.dump('', fh_) if status is True: ret['status'] = result_status_code if headers is True: ret['headers'] = result_headers if decode is True: if decode_type == 'auto': content_type = result_headers.get( 'content-type', 'application/json' ) if 'xml' in content_type: decode_type = 'xml' elif 'json' in content_type: decode_type = 'json' elif 'yaml' in content_type: decode_type = 'yaml' else: decode_type = 'plain' valid_decodes = ('json', 'xml', 'yaml', 'plain') if decode_type not in valid_decodes: ret['error'] = ( 'Invalid decode_type specified. ' 'Valid decode types are: {0}'.format( pprint.pformat(valid_decodes) ) ) log.error(ret['error']) return ret if decode_type == 'json': ret['dict'] = json.loads(salt.utils.to_str(result_text)) elif decode_type == 'xml': ret['dict'] = [] items = ET.fromstring(result_text) for item in items: ret['dict'].append(xml.to_dict(item)) elif decode_type == 'yaml': ret['dict'] = yaml.safe_load(result_text) else: text = True if decode_out and os.path.exists(decode_out): with salt.utils.fopen(decode_out, 'w') as dof: dof.write(result_text) if text is True: ret['text'] = result_text return ret
def query(action=None, command=None, args=None, method='GET', data=None): ''' Make a web call to a Parallels provider ''' path = config.get_cloud_config_value( 'url', get_configured_provider(), __opts__, search_global=False ) auth_handler = _HTTPBasicAuthHandler() auth_handler.add_password( realm='Parallels Instance Manager', uri=path, user=config.get_cloud_config_value( 'user', get_configured_provider(), __opts__, search_global=False ), passwd=config.get_cloud_config_value( 'password', get_configured_provider(), __opts__, search_global=False ) ) opener = _build_opener(auth_handler) _install_opener(opener) if action: path += action if command: path += '/{0}'.format(command) if not type(args, dict): args = {} kwargs = {'data': data} if isinstance(data, str) and '<?xml' in data: kwargs['headers'] = { 'Content-type': 'application/xml', } if args: params = _urlencode(args) req = _Request(url='{0}?{1}'.format(path, params), **kwargs) else: req = _Request(url=path, **kwargs) req.get_method = lambda: method log.debug('{0} {1}'.format(method, req.get_full_url())) if data: log.debug(data) try: result = _urlopen(req) log.debug( 'PARALLELS Response Status Code: {0}'.format( result.getcode() ) ) if 'content-length' in result.headers: content = result.read() result.close() items = ET.fromstring(content) return items return {} except URLError as exc: log.error( 'PARALLELS Response Status Code: {0} {1}'.format( exc.code, exc.msg ) ) root = ET.fromstring(exc.read()) log.error(root) return {'error': root}
def show_employee(emp_id, fields=None): ''' Show all employees for this company. CLI Example: salt myminion bamboohr.show_employee 1138 By default, the fields normally returned from bamboohr.list_employees are returned. These fields are: - canUploadPhoto - department - displayName - firstName - id - jobTitle - lastName - location - mobilePhone - nickname - photoUploaded - photoUrl - workEmail - workPhone - workPhoneExtension If needed, a different set of fields may be specified, separated by commas: CLI Example: salt myminion bamboohr.show_employee 1138 displayName,dateOfBirth A list of available fields can be found at http://www.bamboohr.com/api/documentation/employees.php ''' ret = {} if fields is None: fields = ','.join(( 'canUploadPhoto', 'department', 'displayName', 'firstName', 'id', 'jobTitle', 'lastName', 'location', 'mobilePhone', 'nickname', 'photoUploaded', 'photoUrl', 'workEmail', 'workPhone', 'workPhoneExtension', )) status, result = _query( action='employees', command=emp_id, args={'fields': fields} ) root = ET.fromstring(result) items = root.getchildren() ret = {'id': emp_id} for item in items: ret[item.items()[0][1]] = item.text return ret
def query(params=None, setname=None, requesturl=None, location=None, return_url=False, return_root=False, opts=None, provider=None, endpoint=None, product='ec2', sigver='2'): ''' Perform a query against AWS services using Signature Version 2 Signing Process. This is documented at: http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html Regions and endpoints are documented at: http://docs.aws.amazon.com/general/latest/gr/rande.html Default ``product`` is ``ec2``. Valid ``product`` names are: .. code-block: yaml - autoscaling (Auto Scaling) - cloudformation (CloudFormation) - ec2 (Elastic Compute Cloud) - elasticache (ElastiCache) - elasticbeanstalk (Elastic BeanStalk) - elasticloadbalancing (Elastic Load Balancing) - elasticmapreduce (Elastic MapReduce) - iam (Identity and Access Management) - importexport (Import/Export) - monitoring (CloudWatch) - rds (Relational Database Service) - simpledb (SimpleDB) - sns (Simple Notification Service) - sqs (Simple Queue Service) ''' if params is None: params = {} if opts is None: opts = {} function = opts.get('function', (None, product)) providers = opts.get('providers', {}) if provider is None: prov_dict = providers.get(function[1], {}).get(product, {}) if prov_dict: driver = list(list(prov_dict.keys()))[0] provider = providers.get(driver, product) else: prov_dict = providers.get(provider, {}).get(product, {}) service_url = prov_dict.get('service_url', 'amazonaws.com') if not location: location = get_location(opts, provider) if endpoint is None: if not requesturl: endpoint = prov_dict.get( 'endpoint', '{0}.{1}.{2}'.format(product, location, service_url) ) requesturl = 'https://{0}/'.format(endpoint) else: endpoint = urlparse(requesturl).netloc if endpoint == '': endpoint_err = ('Could not find a valid endpoint in the ' 'requesturl: {0}. Looking for something ' 'like https://some.aws.endpoint/?args').format( requesturl ) LOG.error(endpoint_err) if return_url is True: return {'error': endpoint_err}, requesturl return {'error': endpoint_err} LOG.debug('Using AWS endpoint: {0}'.format(endpoint)) method = 'GET' aws_api_version = prov_dict.get( 'aws_api_version', prov_dict.get( '{0}_api_version'.format(product), DEFAULT_AWS_API_VERSION ) ) if sigver == '4': headers, requesturl = sig4( method, endpoint, params, prov_dict, aws_api_version, location, product, requesturl=requesturl ) params_with_headers = {} else: params_with_headers = sig2( method, endpoint, params, prov_dict, aws_api_version ) headers = {} attempts = 5 while attempts > 0: LOG.debug('AWS Request: {0}'.format(requesturl)) LOG.trace('AWS Request Parameters: {0}'.format(params_with_headers)) try: result = requests.get(requesturl, headers=headers, params=params_with_headers) LOG.debug( 'AWS Response Status Code: {0}'.format( result.status_code ) ) LOG.trace( 'AWS Response Text: {0}'.format( result.text ) ) result.raise_for_status() break except requests.exceptions.HTTPError as exc: root = ET.fromstring(exc.response.content) data = xml.to_dict(root) # check to see if we should retry the query err_code = data.get('Errors', {}).get('Error', {}).get('Code', '') if attempts > 0 and err_code and err_code in AWS_RETRY_CODES: attempts -= 1 LOG.error( 'AWS Response Status Code and Error: [{0} {1}] {2}; ' 'Attempts remaining: {3}'.format( exc.response.status_code, exc, data, attempts ) ) # Wait a bit before continuing to prevent throttling time.sleep(2) continue LOG.error( 'AWS Response Status Code and Error: [{0} {1}] {2}'.format( exc.response.status_code, exc, data ) ) if return_url is True: return {'error': data}, requesturl return {'error': data} else: LOG.error( 'AWS Response Status Code and Error: [{0} {1}] {2}'.format( exc.response.status_code, exc, data ) ) if return_url is True: return {'error': data}, requesturl return {'error': data} response = result.text root = ET.fromstring(response) items = root[1] if return_root is True: items = root if setname: if sys.version_info < (2, 7): children_len = len(root.getchildren()) else: children_len = len(root) for item in range(0, children_len): comps = root[item].tag.split('}') if comps[1] == setname: items = root[item] ret = [] for item in items: ret.append(xml.to_dict(item)) if return_url is True: return ret, requesturl return ret
def query(key, keyid, method='GET', params=None, headers=None, requesturl=None, return_url=False, bucket=None, service_url=None, path='', return_bin=False, action=None, local_file=None, verify_ssl=True, location=None, full_headers=False): ''' Perform a query against an S3-like API. This function requires that a secret key and the id for that key are passed in. For instance: s3.keyid: GKTADJGHEIQSXMKKRBJ08H s3.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs If keyid or key is not specified, an attempt to fetch them from EC2 IAM metadata service will be made. A service_url may also be specified in the configuration: s3.service_url: s3.amazonaws.com If a service_url is not specified, the default is s3.amazonaws.com. This may appear in various documentation as an "endpoint". A comprehensive list for Amazon S3 may be found at:: http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region The service_url will form the basis for the final endpoint that is used to query the service. SSL verification may also be turned off in the configuration: s3.verify_ssl: False This is required if using S3 bucket names that contain a period, as these will not match Amazon's S3 wildcard certificates. Certificate verification is enabled by default. A region may be specified: s3.location: eu-central-1 If region is not specified, an attempt to fetch the region from EC2 IAM metadata service will be made. Failing that, default is us-east-1 ''' if not HAS_REQUESTS: log.error('There was an error: requests is required for s3 access') if not headers: headers = {} if not params: params = {} if not service_url: service_url = 's3.amazonaws.com' if bucket: endpoint = '{0}.{1}'.format(bucket, service_url) else: endpoint = service_url # Try grabbing the credentials from the EC2 instance IAM metadata if available if not key or not keyid: key = salt.utils.aws.IROLE_CODE keyid = salt.utils.aws.IROLE_CODE if not location: location = iam.get_iam_region() if not location: location = DEFAULT_LOCATION data = '' if method == 'PUT': if local_file: with salt.utils.fopen(local_file, 'r') as ifile: data = ifile.read() if not requesturl: requesturl = 'https://{0}/{1}'.format(endpoint, path) headers, requesturl = salt.utils.aws.sig4( method, endpoint, params, data=data, uri='/{0}'.format(path), prov_dict={'id': keyid, 'key': key}, location=location, product='s3', requesturl=requesturl, ) log.debug('S3 Request: {0}'.format(requesturl)) log.debug('S3 Headers::') log.debug(' Authorization: {0}'.format(headers['Authorization'])) if not data: data = None try: result = requests.request(method, requesturl, headers=headers, data=data, verify=verify_ssl) response = result.content except requests.exceptions.HTTPError as exc: log.error('There was an error::') if hasattr(exc, 'code') and hasattr(exc, 'msg'): log.error(' Code: {0}: {1}'.format(exc.code, exc.msg)) log.error(' Content: \n{0}'.format(exc.read())) return False log.debug('S3 Response Status Code: {0}'.format(result.status_code)) if method == 'PUT': if result.status_code == 200: if local_file: log.debug('Uploaded from {0} to {1}'.format(local_file, path)) else: log.debug('Created bucket {0}'.format(bucket)) else: if local_file: log.debug('Failed to upload from {0} to {1}: {2}'.format( local_file, path, result.status_code, )) else: log.debug('Failed to create bucket {0}'.format(bucket)) return if method == 'DELETE': if str(result.status_code).startswith('2'): if path: log.debug('Deleted {0} from bucket {1}'.format(path, bucket)) else: log.debug('Deleted bucket {0}'.format(bucket)) else: if path: log.debug('Failed to delete {0} from bucket {1}: {2}'.format( path, bucket, result.status_code, )) else: log.debug('Failed to delete bucket {0}'.format(bucket)) return # This can be used to save a binary object to disk if local_file and method == 'GET': log.debug('Saving to local file: {0}'.format(local_file)) with salt.utils.fopen(local_file, 'w') as out: out.write(response) return 'Saved to local file: {0}'.format(local_file) # This can be used to return a binary object wholesale if return_bin: return response if response: items = ET.fromstring(response) ret = [] for item in items: ret.append(xml.to_dict(item)) if return_url is True: return ret, requesturl else: if result.status_code != requests.codes.ok: return ret = {'headers': []} if full_headers: ret['headers'] = dict(result.headers) else: for header in result.headers: ret['headers'].append(header.strip()) return ret
def query(key, keyid, method='GET', params=None, headers=None, requesturl=None, return_url=False, bucket=None, service_url=None, path=None, return_bin=False, action=None, local_file=None, verify_ssl=True): ''' Perform a query against an S3-like API. This function requires that a secret key and the id for that key are passed in. For instance: s3.keyid: GKTADJGHEIQSXMKKRBJ08H s3.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs A service_url may also be specified in the configuration:: s3.service_url: s3.amazonaws.com If a service_url is not specified, the default is s3.amazonaws.com. This may appear in various documentation as an "endpoint". A comprehensive list for Amazon S3 may be found at:: http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region The service_url will form the basis for the final endpoint that is used to query the service. SSL verification may also be turned off in the configuration: s3.verify_ssl: False This is required if using S3 bucket names that contain a period, as these will not match Amazon's S3 wildcard certificates. Certificate verification is enabled by default. ''' if not headers: headers = {} if not params: params = {} if path is None: path = '' if not service_url: service_url = 's3.amazonaws.com' if bucket: endpoint = '{0}.{1}'.format(bucket, service_url) else: endpoint = service_url # Try grabbing the credentials from the EC2 instance IAM metadata if available token = None if not key or not keyid: iam_creds = iam.get_iam_metadata() key = iam_creds['secret_key'] keyid = iam_creds['access_key'] token = iam_creds['security_token'] if not requesturl: x_amz_date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT') content_type = 'text/plain' if method == 'GET': if bucket: can_resource = '/{0}/{1}'.format(bucket, path) else: can_resource = '/' elif method == 'PUT' or method == 'HEAD' or method == 'DELETE': if path: can_resource = '/{0}/{1}'.format(bucket, path) else: can_resource = '/{0}/'.format(bucket) if action: can_resource += '?{0}'.format(action) log.debug('CanonicalizedResource: {0}'.format(can_resource)) headers['Host'] = endpoint headers['Content-type'] = content_type headers['Date'] = x_amz_date if token: headers['x-amz-security-token'] = token string_to_sign = '{0}\n'.format(method) new_headers = [] for header in sorted(headers): if header.lower().startswith('x-amz'): log.debug(header.lower()) new_headers.append('{0}:{1}'.format(header.lower(), headers[header])) can_headers = '\n'.join(new_headers) log.debug('CanonicalizedAmzHeaders: {0}'.format(can_headers)) string_to_sign += '\n{0}'.format(content_type) string_to_sign += '\n{0}'.format(x_amz_date) if can_headers: string_to_sign += '\n{0}'.format(can_headers) string_to_sign += '\n{0}'.format(can_resource) log.debug('String To Sign:: \n{0}'.format(string_to_sign)) hashed = hmac.new(key, string_to_sign, hashlib.sha1) sig = binascii.b2a_base64(hashed.digest()) headers['Authorization'] = 'AWS {0}:{1}'.format(keyid, sig.strip()) querystring = urllib.urlencode(params) if action: if querystring: querystring = '{0}&{1}'.format(action, querystring) else: querystring = action requesturl = 'https://{0}/'.format(endpoint) if path: requesturl += path if querystring: requesturl += '?{0}'.format(querystring) data = None if method == 'PUT': if local_file: with salt.utils.fopen(local_file, 'r') as ifile: data = ifile.read() log.debug('S3 Request: {0}'.format(requesturl)) log.debug('S3 Headers::') log.debug(' Authorization: {0}'.format(headers['Authorization'])) try: result = requests.request(method, requesturl, headers=headers, data=data, verify=verify_ssl) response = result.content except requests.exceptions.HTTPError as exc: log.error('There was an error::') if hasattr(exc, 'code') and hasattr(exc, 'msg'): log.error(' Code: {0}: {1}'.format(exc.code, exc.msg)) log.error(' Content: \n{0}'.format(exc.read())) return False log.debug('S3 Response Status Code: {0}'.format(result.status_code)) if method == 'PUT': if result.status_code == 200: if local_file: log.debug('Uploaded from {0} to {1}'.format(local_file, path)) else: log.debug('Created bucket {0}'.format(bucket)) else: if local_file: log.debug('Failed to upload from {0} to {1}: {2}'.format( local_file, path, result.status_code, )) else: log.debug('Failed to create bucket {0}'.format(bucket)) return if method == 'DELETE': if str(result.status_code).startswith('2'): if path: log.debug('Deleted {0} from bucket {1}'.format(path, bucket)) else: log.debug('Deleted bucket {0}'.format(bucket)) else: if path: log.debug('Failed to delete {0} from bucket {1}: {2}'.format( path, bucket, result.status_code, )) else: log.debug('Failed to delete bucket {0}'.format(bucket)) return # This can be used to save a binary object to disk if local_file and method == 'GET': log.debug('Saving to local file: {0}'.format(local_file)) with salt.utils.fopen(local_file, 'w') as out: out.write(response) return 'Saved to local file: {0}'.format(local_file) # This can be used to return a binary object wholesale if return_bin: return response if response: items = ET.fromstring(response) ret = [] for item in items: ret.append(xml.to_dict(item)) if return_url is True: return ret, requesturl else: if result.status_code != requests.codes.ok: return ret = {'headers': []} for header in result.headers: ret['headers'].append(header.strip()) return ret
def query(url, method='GET', params=None, data=None, data_file=None, header_dict=None, header_list=None, header_file=None, username=None, password=None, auth=None, decode=False, decode_type='auto', status=False, headers=False, text=False, cookies=None, cookie_jar=JARFILE, cookie_format='lwp', persist_session=False, session_cookie_jar=SESSIONJARFILE, data_render=False, data_renderer=None, header_render=False, header_renderer=None, template_dict=None, test=False, test_url=None, node='minion', port=80, opts=None, requests_lib=None, ca_bundle=None, verify_ssl=None, cert=None, text_out=None, headers_out=None, decode_out=None, stream=False, handle=False, agent=USERAGENT, **kwargs): ''' Query a resource, and decode the return data ''' ret = {} if opts is None: if node == 'master': opts = salt.config.master_config( os.path.join(syspaths.CONFIG_DIR, 'master') ) elif node == 'minion': opts = salt.config.minion_config( os.path.join(syspaths.CONFIG_DIR, 'minion') ) else: opts = {} if requests_lib is None: requests_lib = opts.get('requests_lib', False) if requests_lib is True: if HAS_REQUESTS is False: ret['error'] = ('http.query has been set to use requests, but the ' 'requests library does not seem to be installed') log.error(ret['error']) return ret else: requests_log = logging.getLogger('requests') requests_log.setLevel(logging.WARNING) if ca_bundle is None: ca_bundle = get_ca_bundle(opts) if verify_ssl is None: verify_ssl = opts.get('verify_ssl', True) if cert is None: cert = opts.get('cert', None) if data_file is not None: data = _render( data_file, data_render, data_renderer, template_dict, opts ) log.debug('Using {0} Method'.format(method)) if method == 'POST': log.trace('POST Data: {0}'.format(pprint.pformat(data))) if header_file is not None: header_tpl = _render( header_file, header_render, header_renderer, template_dict, opts ) if isinstance(header_tpl, dict): header_dict = header_tpl else: header_list = header_tpl.splitlines() if header_dict is None: header_dict = {} if header_list is None: header_list = [] if persist_session is True and HAS_MSGPACK: # TODO: This is hackish; it will overwrite the session cookie jar with # all cookies from this one connection, rather than behaving like a # proper cookie jar. Unfortunately, since session cookies do not # contain expirations, they can't be stored in a proper cookie jar. if os.path.isfile(session_cookie_jar): with salt.utils.fopen(session_cookie_jar, 'r') as fh_: session_cookies = msgpack.load(fh_) if isinstance(session_cookies, dict): header_dict.update(session_cookies) else: with salt.utils.fopen(session_cookie_jar, 'w') as fh_: msgpack.dump('', fh_) for header in header_list: comps = header.split(':') if len(comps) < 2: continue header_dict[comps[0].strip()] = comps[1].strip() if username and password: auth = (username, password) else: auth = None if requests_lib is True: sess = requests.Session() sess.auth = auth sess.headers.update(header_dict) log.trace('Request Headers: {0}'.format(sess.headers)) sess_cookies = sess.cookies sess.verify = verify_ssl else: sess_cookies = None if cookies is not None: if cookie_format == 'mozilla': sess_cookies = salt.ext.six.moves.http_cookiejar.MozillaCookieJar(cookie_jar) else: sess_cookies = salt.ext.six.moves.http_cookiejar.LWPCookieJar(cookie_jar) if not os.path.isfile(cookie_jar): sess_cookies.save() else: sess_cookies.load() if agent == USERAGENT: agent = '{0} http.query()'.format(agent) header_dict['User-agent'] = agent if test is True: if test_url is None: return {} else: url = test_url ret['test'] = True if requests_lib is True: req_kwargs = {} if stream is True: if requests.__version__[0] == '0': # 'stream' was called 'prefetch' before 1.0, with flipped meaning req_kwargs['prefetch'] = False else: req_kwargs['stream'] = True # Client-side cert handling if cert is not None: if isinstance(cert, string_types): if os.path.exists(cert): req_kwargs['cert'] = cert elif isinstance(cert, tuple): if os.path.exists(cert[0]) and os.path.exists(cert[1]): req_kwargs['cert'] = cert else: log.error('The client-side certificate path that was passed is ' 'not valid: {0}'.format(cert)) result = sess.request( method, url, params=params, data=data, **req_kwargs ) result.raise_for_status() if stream is True or handle is True: return {'handle': result} result_status_code = result.status_code result_headers = result.headers result_text = result.text result_cookies = result.cookies else: request = urllib_request.Request(url, data) handlers = [ urllib_request.HTTPHandler, urllib_request.HTTPCookieProcessor(sess_cookies) ] if url.startswith('https') or port == 443: if not HAS_MATCHHOSTNAME: log.warn(('match_hostname() not available, SSL hostname checking ' 'not available. THIS CONNECTION MAY NOT BE SECURE!')) elif verify_ssl is False: log.warn(('SSL certificate verification has been explicitly ' 'disabled. THIS CONNECTION MAY NOT BE SECURE!')) else: hostname = request.get_host() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((hostname, 443)) sockwrap = ssl.wrap_socket( sock, ca_certs=ca_bundle, cert_reqs=ssl.CERT_REQUIRED ) try: match_hostname(sockwrap.getpeercert(), hostname) except CertificateError as exc: ret['error'] = ( 'The certificate was invalid. ' 'Error returned was: {0}'.format( pprint.pformat(exc) ) ) return ret # Client-side cert handling if cert is not None: cert_chain = None if isinstance(cert, string_types): if os.path.exists(cert): cert_chain = (cert) elif isinstance(cert, tuple): if os.path.exists(cert[0]) and os.path.exists(cert[1]): cert_chain = cert else: log.error('The client-side certificate path that was ' 'passed is not valid: {0}'.format(cert)) return if hasattr(ssl, 'SSLContext'): # Python >= 2.7.9 context = ssl.SSLContext.load_cert_chain(*cert_chain) handlers.append(urllib_request.HTTPSHandler(context=context)) # pylint: disable=E1123 else: # Python < 2.7.9 cert_kwargs = { 'host': request.get_host(), 'port': port, 'cert_file': cert_chain[0] } if len(cert_chain) > 1: cert_kwargs['key_file'] = cert_chain[1] handlers[0] = salt.ext.six.moves.http_client.HTTPSConnection(**cert_kwargs) opener = urllib_request.build_opener(*handlers) for header in header_dict: request.add_header(header, header_dict[header]) request.get_method = lambda: method result = opener.open(request) if stream is True or handle is True: return {'handle': result} result_status_code = result.code result_headers = result.headers.headers result_text = result.read() if isinstance(result_headers, list): result_headers_dict = {} for header in result_headers: comps = header.split(':') result_headers_dict[comps[0].strip()] = ':'.join(comps[1:]).strip() result_headers = result_headers_dict log.debug('Response Status Code: {0}'.format(result_status_code)) log.trace('Response Headers: {0}'.format(result_headers)) log.trace('Response Cookies: {0}'.format(sess_cookies)) try: log.trace('Response Text: {0}'.format(result_text)) except UnicodeEncodeError as exc: log.trace(('Cannot Trace Log Response Text: {0}. This may be due to ' 'incompatibilities between requests and logging.').format(exc)) if text_out is not None and os.path.exists(text_out): with salt.utils.fopen(text_out, 'w') as tof: tof.write(result_text) if headers_out is not None and os.path.exists(headers_out): with salt.utils.fopen(headers_out, 'w') as hof: hof.write(result_headers) if cookies is not None: sess_cookies.save() if persist_session is True and HAS_MSGPACK: # TODO: See persist_session above if 'set-cookie' in result_headers: with salt.utils.fopen(session_cookie_jar, 'w') as fh_: session_cookies = result_headers.get('set-cookie', None) if session_cookies is not None: msgpack.dump({'Cookie': session_cookies}, fh_) else: msgpack.dump('', fh_) if status is True: ret['status'] = result_status_code if headers is True: ret['headers'] = result_headers if decode is True: if decode_type == 'auto': content_type = result_headers.get( 'content-type', 'application/json' ) if 'xml' in content_type: decode_type = 'xml' elif 'json' in content_type: decode_type = 'json' else: decode_type = 'plain' valid_decodes = ('json', 'xml', 'plain') if decode_type not in valid_decodes: ret['error'] = ( 'Invalid decode_type specified. ' 'Valid decode types are: {0}'.format( pprint.pformat(valid_decodes) ) ) log.error(ret['error']) return ret if decode_type == 'json': ret['dict'] = json.loads(result_text) elif decode_type == 'xml': ret['dict'] = [] items = ET.fromstring(result_text) for item in items: ret['dict'].append(xml.to_dict(item)) else: text = True if decode_out and os.path.exists(decode_out): with salt.utils.fopen(decode_out, 'w') as dof: dof.write(result_text) if text is True: ret['text'] = result_text return ret