示例#1
0
文件: parallels.py 项目: hulu/salt
def query(action=None, command=None, args=None, method="GET", data=None):
    """
    Make a web call to a Parallels provider
    """
    path = config.get_config_value("url", get_configured_provider(), __opts__, search_global=False)
    auth_handler = urllib2.HTTPBasicAuthHandler()
    auth_handler.add_password(
        realm="Parallels Instance Manager",
        uri=path,
        user=config.get_config_value("user", get_configured_provider(), __opts__, search_global=False),
        passwd=config.get_config_value("password", get_configured_provider(), __opts__, search_global=False),
    )
    opener = urllib2.build_opener(auth_handler)
    urllib2.install_opener(opener)

    if action:
        path += action

    if command:
        path += "/{0}".format(command)

    if type(args) is not dict:
        args = {}

    kwargs = {"data": data}
    if type(data) is str and "<?xml" in data:
        kwargs["headers"] = {"Content-type": "application/xml"}

    if args:
        path += "?%s"
        params = urllib.urlencode(args)
        req = urllib2.Request(url=path % params, **kwargs)
    else:
        req = urllib2.Request(url=path, **kwargs)

    req.get_method = lambda: method

    log.debug("{0} {1}".format(method, req.get_full_url()))
    if data:
        log.debug(data)

    try:
        result = urllib2.urlopen(req)
        log.debug("PARALLELS Response Status Code: {0}".format(result.getcode()))

        if "content-length" in result.headers:
            content = result.read()
            result.close()
            items = ET.fromstring(content)
            return items

        return {}
    except urllib2.URLError as exc:
        log.error("PARALLELS Response Status Code: {0} {1}".format(exc.code, exc.msg))
        root = ET.fromstring(exc.read())
        log.error(root)
        return {"error": root}
示例#2
0
文件: boto_cfn.py 项目: iquaba/salt
def _get_error(error):
    # Converts boto exception to string that can be used to output error.
    error = '\n'.join(error.split('\n')[1:])
    error = ET.fromstring(error)
    code = error[0][1].text
    message = error[0][2].text
    return code, message
示例#3
0
文件: bamboohr.py 项目: DaveQB/salt
def list_users(order_by='id'):
    '''
    Show all users for this company.

    CLI Example:

        salt myminion bamboohr.list_users

    By default, the return data will be keyed by ID. However, it can be ordered
    by any other field. Keep in mind that if the field that is chosen contains
    duplicate values (i.e., location is used, for a company which only has one
    location), then each duplicate value will be overwritten by the previous.
    Therefore, it is advisable to only sort by fields that are guaranteed to be
    unique.

    CLI Examples:

        salt myminion bamboohr.list_users order_by=id
        salt myminion bamboohr.list_users order_by=email
    '''
    ret = {}
    status, result = _query(action='meta', command='users')
    root = ET.fromstring(result)
    users = root.getchildren()
    for user in users:
        user_id = None
        user_ret = {}
        for item in user.items():
            user_ret[item[0]] = item[1]
            if item[0] == 'id':
                user_id = item[1]
        for item in user.getchildren():
            user_ret[item.tag] = item.text
        ret[user_ret[order_by]] = user_ret
    return ret
示例#4
0
文件: bamboohr.py 项目: DaveQB/salt
def list_employees(order_by='id'):
    '''
    Show all employees for this company.

    CLI Example:

        salt myminion bamboohr.list_employees

    By default, the return data will be keyed by ID. However, it can be ordered
    by any other field. Keep in mind that if the field that is chosen contains
    duplicate values (i.e., location is used, for a company which only has one
    location), then each duplicate value will be overwritten by the previous.
    Therefore, it is advisable to only sort by fields that are guaranteed to be
    unique.

    CLI Examples:

        salt myminion bamboohr.list_employees order_by=id
        salt myminion bamboohr.list_employees order_by=displayName
        salt myminion bamboohr.list_employees order_by=workEmail
    '''
    ret = {}
    status, result = _query(action='employees', command='directory')
    root = ET.fromstring(result)
    directory = root.getchildren()
    for cat in directory:
        if cat.tag != 'employees':
            continue
        for item in cat:
            emp_id = item.items()[0][1]
            emp_ret = {'id': emp_id}
            for details in item.getchildren():
                emp_ret[details.items()[0][1]] = details.text
            ret[emp_ret[order_by]] = emp_ret
    return ret
示例#5
0
文件: virt_test.py 项目: DaveQB/salt
 def test_gen_vol_xml_for_esxi(self):
     xml_data = virt._gen_vol_xml('vmname', 'system', 8192, 'esxi')
     root = ET.fromstring(xml_data)
     self.assertEqual(root.find('name').text, 'vmname/system.vmdk')
     self.assertEqual(root.find('key').text, 'vmname/system')
     self.assertEqual(root.find('capacity').attrib['unit'], 'KiB')
     self.assertEqual(root.find('capacity').text, str(8192 * 1024))
示例#6
0
文件: parallels.py 项目: hulu/salt
def create_node(vm_):
    """
    Build and submit the XML to create a node
    """
    # Start the tree
    content = ET.Element("ve")

    # Name of the instance
    name = ET.SubElement(content, "name")
    name.text = vm_["name"]

    # Description, defaults to name
    desc = ET.SubElement(content, "description")
    desc.text = config.get_config_value("desc", vm_, __opts__, default=vm_["name"], search_global=False)

    # How many CPU cores, and how fast they are
    cpu = ET.SubElement(content, "cpu")
    cpu.attrib["number"] = config.get_config_value("cpu_number", vm_, __opts__, default="1", search_global=False)
    cpu.attrib["power"] = config.get_config_value("cpu_power", vm_, __opts__, default="1000", search_global=False)

    # How many megabytes of RAM
    ram = ET.SubElement(content, "ram-size")
    ram.text = config.get_config_value("ram", vm_, __opts__, default="256", search_global=False)

    # Bandwidth available, in kbps
    bandwidth = ET.SubElement(content, "bandwidth")
    bandwidth.text = config.get_config_value("bandwidth", vm_, __opts__, default="100", search_global=False)

    # How many public IPs will be assigned to this instance
    ip_num = ET.SubElement(content, "no-of-public-ip")
    ip_num.text = config.get_config_value("ip_num", vm_, __opts__, default="1", search_global=False)

    # Size of the instance disk
    disk = ET.SubElement(content, "ve-disk")
    disk.attrib["local"] = "true"
    disk.attrib["size"] = config.get_config_value("disk_size", vm_, __opts__, default="10", search_global=False)

    # Attributes for the image
    vm_image = config.get_config_value("image", vm_, __opts__, search_global=False)
    image = show_image({"image": vm_image}, call="function")
    platform = ET.SubElement(content, "platform")
    template = ET.SubElement(platform, "template-info")
    template.attrib["name"] = vm_image
    os_info = ET.SubElement(platform, "os-info")
    os_info.attrib["technology"] = image[vm_image]["technology"]
    os_info.attrib["type"] = image[vm_image]["osType"]

    # Username and password
    admin = ET.SubElement(content, "admin")
    admin.attrib["login"] = config.get_config_value("ssh_username", vm_, __opts__, default="root")
    admin.attrib["password"] = config.get_config_value("password", vm_, __opts__, search_global=False)

    data = ET.tostring(content, encoding="UTF-8")

    salt.cloud.utils.fire_event(
        "event", "requesting instance", "salt/cloud/{0}/requesting".format(vm_["name"]), {"kwargs": data}
    )

    node = query(action="ve", method="POST", data=data)
    return node
示例#7
0
def _get_artifact_metadata(artifactory_url, repository, group_id, artifact_id, headers):
    metadata_xml = _get_artifact_metadata_xml(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, headers=headers)
    root = ET.fromstring(metadata_xml)

    assert group_id == root.find('groupId').text
    assert artifact_id == root.find('artifactId').text
    latest_version = root.find('versioning').find('latest').text
    return {
        'latest_version': latest_version
    }
示例#8
0
def mksls(src, dst=None):
    '''
    Convert an AutoYAST file to an SLS file
    '''
    with salt.utils.fopen(src, 'r') as fh_:
        ps_opts = xml.to_dict(ET.fromstring(fh_.read()))

    if dst is not None:
        with salt.utils.fopen(dst, 'w') as fh_:
            fh_.write(yaml.safe_dump(ps_opts, default_flow_style=False))
    else:
        return yaml.safe_dump(ps_opts, default_flow_style=False)
示例#9
0
文件: virt_test.py 项目: DaveQB/salt
 def test_boot_default_dev(self):
     diskp = virt._disk_profile('default', 'kvm')
     nicp = virt._nic_profile('default', 'kvm')
     xml_data = virt._gen_xml(
         'hello',
         1,
         512,
         diskp,
         nicp,
         'kvm'
         )
     root = ET.fromstring(xml_data)
     self.assertEqual(root.find('os/boot').attrib['dev'], 'hd')
示例#10
0
def _get_artifact_metadata(artifactory_url, repository, group_id, artifact_id, headers):
    metadata_xml = _get_artifact_metadata_xml(
        artifactory_url=artifactory_url,
        repository=repository,
        group_id=group_id,
        artifact_id=artifact_id,
        headers=headers,
    )
    root = ET.fromstring(metadata_xml)

    assert group_id == root.find("groupId").text
    assert artifact_id == root.find("artifactId").text
    latest_version = root.find("versioning").find("latest").text
    return {"latest_version": latest_version}
示例#11
0
文件: virt_test.py 项目: DaveQB/salt
 def test_controller_for_kvm(self):
     diskp = virt._disk_profile('default', 'kvm')
     nicp = virt._nic_profile('default', 'kvm')
     xml_data = virt._gen_xml(
         'hello',
         1,
         512,
         diskp,
         nicp,
         'kvm'
         )
     root = ET.fromstring(xml_data)
     controllers = root.findall('.//devices/controller')
     # There should be no controller
     self.assertTrue(len(controllers) == 0)
示例#12
0
文件: virt_test.py 项目: DaveQB/salt
 def test_boot_multiple_devs(self):
     diskp = virt._disk_profile('default', 'kvm')
     nicp = virt._nic_profile('default', 'kvm')
     xml_data = virt._gen_xml(
         'hello',
         1,
         512,
         diskp,
         nicp,
         'kvm',
         boot_dev='cdrom network'
         )
     root = ET.fromstring(xml_data)
     devs = root.findall('.//boot')
     self.assertTrue(len(devs) == 2)
示例#13
0
文件: virt_test.py 项目: DaveQB/salt
 def test_controller_for_esxi(self):
     diskp = virt._disk_profile('default', 'esxi')
     nicp = virt._nic_profile('default', 'esxi')
     xml_data = virt._gen_xml(
         'hello',
         1,
         512,
         diskp,
         nicp,
         'esxi'
         )
     root = ET.fromstring(xml_data)
     controllers = root.findall('.//devices/controller')
     self.assertTrue(len(controllers) == 1)
     controller = controllers[0]
     self.assertEqual(controller.attrib['model'], 'lsilogic')
示例#14
0
文件: virt_test.py 项目: DaveQB/salt
 def test_gen_xml_for_serial_console(self):
     diskp = virt._disk_profile('default', 'kvm')
     nicp = virt._nic_profile('default', 'kvm')
     xml_data = virt._gen_xml(
         'hello',
         1,
         512,
         diskp,
         nicp,
         'kvm',
         serial_type='pty',
         console=True
         )
     root = ET.fromstring(xml_data)
     self.assertEqual(root.find('devices/serial').attrib['type'], 'pty')
     self.assertEqual(root.find('devices/console').attrib['type'], 'pty')
示例#15
0
文件: virt_test.py 项目: DaveQB/salt
 def test_gen_xml_for_telnet_console_unspecified_port(self):
     diskp = virt._disk_profile('default', 'kvm')
     nicp = virt._nic_profile('default', 'kvm')
     xml_data = virt._gen_xml(
         'hello',
         1,
         512,
         diskp,
         nicp,
         'kvm',
         serial_type='tcp',
         console=True
         )
     root = ET.fromstring(xml_data)
     self.assertEqual(root.find('devices/serial').attrib['type'], 'tcp')
     self.assertEqual(root.find('devices/console').attrib['type'], 'tcp')
     self.assertIsInstance(int(root.find('devices/console/source').attrib['service']), int)
示例#16
0
def get_config(instance=_DEFAULT_INSTANCE):
    '''
    Determine the configuration of the provided instance.

    :param str instance: The name of the Tentacle instance.

    :return: A dictionary containing the configuration data.
    :rtype: dict

    CLI Example:

    .. code-block:: bash

        salt '*' octopus_tentacle.get_config instance='Tentacle'
    '''
    ret = dict()
    name_mapping = {'Octopus.Home': 'home_path',
                    'Octopus.Communications.Squid': 'squid',
                    'Tentacle.CertificateThumbprint': 'thumbprint',
                    'Tentacle.Communication.TrustedOctopusServers': 'servers',
                    'Tentacle.Deployment.ApplicationDirectory': 'app_path',
                    'Tentacle.Services.NoListen': 'comms',
                    'Tentacle.Services.PortNumber': 'port'}

    config_path = get_config_path(instance)

    if not os.path.isfile(config_path):
        _LOG.error('Unable to get configuration file for instance: %s', instance)
        return ret

    with salt.utils.fopen(config_path, 'r') as fh_:
        config = _parse_config(ElementTree.fromstring(fh_.read()))

    for item in config:
        # Skip keys that we aren't specifically looking for.
        if item in name_mapping:
            # Convert the NoListen value to a friendly value.
            if name_mapping[item] == 'comms':
                for comms_style in _COMMS_STYLES:
                    if config[item] == _COMMS_STYLES[comms_style]:
                        ret[name_mapping[item]] = comms_style
                        break
            else:
                ret[name_mapping[item]] = config[item]

    return ret
示例#17
0
def _get_snapshot_version_metadata(artifactory_url, repository, group_id, artifact_id, version, headers):
    metadata_xml = _get_snapshot_version_metadata_xml(artifactory_url=artifactory_url, repository=repository, group_id=group_id, artifact_id=artifact_id, version=version, headers=headers)
    metadata = ET.fromstring(metadata_xml)

    assert group_id == metadata.find('groupId').text
    assert artifact_id == metadata.find('artifactId').text
    assert version == metadata.find('version').text

    snapshot_versions = metadata.find('versioning').find('snapshotVersions')
    extension_version_dict = {}
    for snapshot_version in snapshot_versions:
        extension = snapshot_version.find('extension').text
        value = snapshot_version.find('value').text
        extension_version_dict[extension] = value

    return {
        'snapshot_versions': extension_version_dict
    }
示例#18
0
文件: virt_test.py 项目: DaveQB/salt
    def test_gen_xml_for_esxi_custom_profile(self, disk_profile, nic_profile):
        diskp_yaml = '''
- first:
    size: 8192
    format: vmdk
    model: scsi
    pool: datastore1
- second:
    size: 4096
    format: vmdk  # FIX remove line, currently test fails
    model: scsi   # FIX remove line, currently test fails
    pool: datastore2
'''
        nicp_yaml = '''
- type: bridge
  name: eth1
  source: ONENET
  model: e1000
  mac: '00:00:00:00:00:00'
- name: eth2
  type: bridge
  source: TWONET
  model: e1000
  mac: '00:00:00:00:00:00'
'''
        disk_profile.return_value = yaml.load(diskp_yaml)
        nic_profile.return_value = yaml.load(nicp_yaml)
        diskp = virt._disk_profile('noeffect', 'esxi')
        nicp = virt._nic_profile('noeffect', 'esxi')
        xml_data = virt._gen_xml(
            'hello',
            1,
            512,
            diskp,
            nicp,
            'esxi',
            )
        root = ET.fromstring(xml_data)
        self.assertEqual(root.attrib['type'], 'vmware')
        self.assertEqual(root.find('vcpu').text, '1')
        self.assertEqual(root.find('memory').text, str(512 * 1024))
        self.assertEqual(root.find('memory').attrib['unit'], 'KiB')
        self.assertTrue(len(root.findall('.//disk')) == 2)
        self.assertTrue(len(root.findall('.//interface')) == 2)
示例#19
0
文件: ilo.py 项目: mahak/salt
def __execute_cmd(name, xml):
    '''
    Execute ilom commands
    '''
    ret = {name.replace('_', ' '): {}}
    id_num = 0

    tmp_dir = os.path.join(__opts__['cachedir'], 'tmp')
    if not os.path.isdir(tmp_dir):
        os.mkdir(tmp_dir)
    with tempfile.NamedTemporaryFile(dir=tmp_dir,
                                     prefix=name+str(os.getpid()),
                                     suffix='.xml',
                                     delete=False) as fh:
        tmpfilename = fh.name
        fh.write(xml)

    cmd = __salt__['cmd.run_all']('hponcfg -f {0}'.format(tmpfilename))

    # Clean up the temp file
    __salt__['file.remove'](tmpfilename)

    if cmd['retcode'] != 0:
        for i in cmd['stderr'].splitlines():
            if i.startswith('     MESSAGE='):
                return {'Failed': i.split('=')[-1]}
        return False

    try:
        for i in ET.fromstring(''.join(cmd['stdout'].splitlines()[3:-1])):
            # Make sure dict keys don't collide
            if ret[name.replace('_', ' ')].get(i.tag, False):
                ret[name.replace('_', ' ')].update(
                    {i.tag + '_' + str(id_num): i.attrib}
                )
                id_num += 1
            else:
                ret[name.replace('_', ' ')].update(
                    {i.tag: i.attrib}
                )
    except SyntaxError:
        return True

    return ret
示例#20
0
文件: virt_test.py 项目: DaveQB/salt
    def test_gen_xml_for_kvm_custom_profile(self, disk_profile, nic_profile):
        diskp_yaml = '''
- first:
    size: 8192
    format: qcow2
    model: virtio
    pool: /var/lib/images
- second:
    size: 4096
    format: qcow2   # FIX remove line, currently test fails
    model: virtio   # FIX remove line, currently test fails
    pool: /var/lib/images
'''
        nicp_yaml = '''
- type: bridge
  name: eth1
  source: b2
  model: virtio
  mac: '00:00:00:00:00:00'
- name: eth2
  type: bridge
  source: b2
  model: virtio
  mac: '00:00:00:00:00:00'
'''
        disk_profile.return_value = yaml.load(diskp_yaml)
        nic_profile.return_value = yaml.load(nicp_yaml)
        diskp = virt._disk_profile('noeffect', 'kvm')
        nicp = virt._nic_profile('noeffect', 'kvm')
        xml_data = virt._gen_xml(
            'hello',
            1,
            512,
            diskp,
            nicp,
            'kvm',
            )
        root = ET.fromstring(xml_data)
        self.assertEqual(root.attrib['type'], 'kvm')
        self.assertEqual(root.find('vcpu').text, '1')
        self.assertEqual(root.find('memory').text, str(512 * 1024))
        self.assertEqual(root.find('memory').attrib['unit'], 'KiB')
        self.assertTrue(len(root.findall('.//disk')) == 2)
        self.assertTrue(len(root.findall('.//interface')) == 2)
示例#21
0
文件: bamboohr.py 项目: DaveQB/salt
def list_meta_fields():
    '''
    Show all meta data fields for this company.

    CLI Example:

        salt myminion bamboohr.list_meta_fields
    '''
    ret = {}
    status, result = _query(action='meta', command='fields')
    root = ET.fromstring(result)
    fields = root.getchildren()
    for field in fields:
        field_id = None
        field_ret = {'name': field.text}
        for item in field.items():
            field_ret[item[0]] = item[1]
            if item[0] == 'id':
                field_id = item[1]
        ret[field_id] = field_ret
    return ret
示例#22
0
def _get_snapshot_version_metadata(artifactory_url, repository, group_id, artifact_id, version, headers):
    metadata_xml = _get_snapshot_version_metadata_xml(
        artifactory_url=artifactory_url,
        repository=repository,
        group_id=group_id,
        artifact_id=artifact_id,
        version=version,
        headers=headers,
    )
    metadata = ET.fromstring(metadata_xml)

    assert group_id == metadata.find("groupId").text
    assert artifact_id == metadata.find("artifactId").text
    assert version == metadata.find("version").text

    snapshot_versions = metadata.find("versioning").find("snapshotVersions")
    extension_version_dict = {}
    for snapshot_version in snapshot_versions:
        extension = snapshot_version.find("extension").text
        value = snapshot_version.find("value").text
        extension_version_dict[extension] = value

    return {"snapshot_versions": extension_version_dict}
示例#23
0
文件: virt_test.py 项目: DaveQB/salt
    def test_gen_xml_for_kvm_default_profile(self):
        diskp = virt._disk_profile('default', 'kvm')
        nicp = virt._nic_profile('default', 'kvm')
        xml_data = virt._gen_xml(
            'hello',
            1,
            512,
            diskp,
            nicp,
            'kvm',
            )
        root = ET.fromstring(xml_data)
        self.assertEqual(root.attrib['type'], 'kvm')
        self.assertEqual(root.find('vcpu').text, '1')
        self.assertEqual(root.find('memory').text, str(512 * 1024))
        self.assertEqual(root.find('memory').attrib['unit'], 'KiB')

        disks = root.findall('.//disk')
        self.assertEqual(len(disks), 1)
        disk = disks[0]
        self.assertTrue(disk.find('source').attrib['file'].startswith('/'))
        self.assertTrue('hello/system' in disk.find('source').attrib['file'])
        self.assertEqual(disk.find('target').attrib['dev'], 'vda')
        self.assertEqual(disk.find('target').attrib['bus'], 'virtio')
        self.assertEqual(disk.find('driver').attrib['name'], 'qemu')
        self.assertEqual(disk.find('driver').attrib['type'], 'qcow2')

        interfaces = root.findall('.//interface')
        self.assertEqual(len(interfaces), 1)
        iface = interfaces[0]
        self.assertEqual(iface.attrib['type'], 'bridge')
        self.assertEqual(iface.find('source').attrib['bridge'], 'br0')
        self.assertEqual(iface.find('model').attrib['type'], 'virtio')

        mac = iface.find('mac').attrib['address']
        self.assertTrue(
              re.match('^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$', mac, re.I))
示例#24
0
文件: virt_test.py 项目: DaveQB/salt
    def test_gen_xml_for_esxi_default_profile(self):
        diskp = virt._disk_profile('default', 'esxi')
        nicp = virt._nic_profile('default', 'esxi')
        xml_data = virt._gen_xml(
            'hello',
            1,
            512,
            diskp,
            nicp,
            'esxi',
            )
        root = ET.fromstring(xml_data)
        self.assertEqual(root.attrib['type'], 'vmware')
        self.assertEqual(root.find('vcpu').text, '1')
        self.assertEqual(root.find('memory').text, str(512 * 1024))
        self.assertEqual(root.find('memory').attrib['unit'], 'KiB')

        disks = root.findall('.//disk')
        self.assertEqual(len(disks), 1)
        disk = disks[0]
        self.assertTrue('[0]' in disk.find('source').attrib['file'])
        self.assertTrue('hello/system' in disk.find('source').attrib['file'])
        self.assertEqual(disk.find('target').attrib['dev'], 'sda')
        self.assertEqual(disk.find('target').attrib['bus'], 'scsi')
        self.assertEqual(disk.find('address').attrib['unit'], '0')

        interfaces = root.findall('.//interface')
        self.assertEqual(len(interfaces), 1)
        iface = interfaces[0]
        self.assertEqual(iface.attrib['type'], 'bridge')
        self.assertEqual(iface.find('source').attrib['bridge'], 'DEFAULT')
        self.assertEqual(iface.find('model').attrib['type'], 'e1000')

        mac = iface.find('mac').attrib['address']
        self.assertTrue(
              re.match('^([0-9A-F]{2}[:-]){5}([0-9A-F]{2})$', mac, re.I))
示例#25
0
def test_update_hostdev_changes(running, live, make_mock_device, make_mock_vm,
                                test):
    """
    Test the virt.update function with host device changes
    """
    xml_def = """
        <domain type='kvm'>
          <name>my_vm</name>
          <memory unit='KiB'>524288</memory>
          <currentMemory unit='KiB'>524288</currentMemory>
          <vcpu placement='static'>1</vcpu>
          <os>
            <type arch='x86_64'>hvm</type>
          </os>
          <on_reboot>restart</on_reboot>
          <devices>
            <hostdev mode='subsystem' type='pci' managed='yes'>
              <source>
                <address domain='0x0000' bus='0x01' slot='0x00' function='0x0'/>
              </source>
              <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
            </hostdev>
          </devices>
        </domain>"""
    domain_mock = make_mock_vm(xml_def, running)

    make_mock_device("""
        <device>
          <name>usb_3_1_3</name>
          <path>/sys/devices/pci0000:00/0000:00:1d.6/0000:06:00.0/0000:07:02.0/0000:3e:00.0/usb3/3-1/3-1.3</path>
          <devnode type='dev'>/dev/bus/usb/003/004</devnode>
          <parent>usb_3_1</parent>
          <driver>
            <name>usb</name>
          </driver>
          <capability type='usb_device'>
            <bus>3</bus>
            <device>4</device>
            <product id='0x6006'>AUKEY PC-LM1E Camera</product>
            <vendor id='0x0458'>KYE Systems Corp. (Mouse Systems)</vendor>
          </capability>
        </device>
    """)

    make_mock_device("""
            <device>
              <name>pci_1002_71c4</name>
              <parent>pci_8086_27a1</parent>
              <capability type='pci'>
                <class>0xffffff</class>
                <domain>0</domain>
                <bus>1</bus>
                <slot>0</slot>
                <function>0</function>
                <product id='0x71c4'>M56GL [Mobility FireGL V5200]</product>
                <vendor id='0x1002'>ATI Technologies Inc</vendor>
                <numa node='1'/>
              </capability>
            </device>
        """)

    ret = virt.update("my_vm",
                      host_devices=["usb_3_1_3"],
                      test=test,
                      live=live)
    define_mock = virt.libvirt.openAuth().defineXML
    assert_called(define_mock, not test)

    # Test that the XML is updated with the proper devices
    usb_device_xml = strip_xml("""
        <hostdev mode="subsystem" type="usb">
          <source>
           <vendor id="0x0458" />
           <product id="0x6006" />
          </source>
        </hostdev>
        """)
    if not test:
        set_xml = ET.fromstring(define_mock.call_args[0][0])
        actual_hostdevs = [
            ET.tostring(xmlutil.strip_spaces(node))
            for node in set_xml.findall("./devices/hostdev")
        ]
        assert [usb_device_xml] == actual_hostdevs

    if not test and live:
        attach_xml = strip_xml(domain_mock.attachDevice.call_args[0][0])
        assert usb_device_xml == attach_xml

        pci_device_xml = strip_xml("""
                <hostdev mode='subsystem' type='pci' managed='yes'>
                  <source>
                    <address domain='0x0000' bus='0x01' slot='0x00' function='0x0'/>
                  </source>
                  <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/>
                </hostdev>
            """)
        detach_xml = strip_xml(domain_mock.detachDevice.call_args[0][0])
        assert pci_device_xml == detach_xml
    else:
        domain_mock.attachDevice.assert_not_called()
        domain_mock.detachDevice.assert_not_called()
示例#26
0
文件: http.py 项目: zyuhu/salt
def query(url,
          method='GET',
          params=None,
          data=None,
          data_file=None,
          header_dict=None,
          header_list=None,
          header_file=None,
          username=None,
          password=None,
          auth=None,
          decode=False,
          decode_type='auto',
          status=False,
          headers=False,
          text=False,
          cookies=None,
          cookie_jar=None,
          cookie_format='lwp',
          persist_session=False,
          session_cookie_jar=None,
          data_render=False,
          data_renderer=None,
          header_render=False,
          header_renderer=None,
          template_dict=None,
          test=False,
          test_url=None,
          node='minion',
          port=80,
          opts=None,
          backend=None,
          ca_bundle=None,
          verify_ssl=None,
          cert=None,
          text_out=None,
          headers_out=None,
          decode_out=None,
          stream=False,
          streaming_callback=None,
          header_callback=None,
          handle=False,
          agent=USERAGENT,
          hide_fields=None,
          raise_error=True,
          **kwargs):
    '''
    Query a resource, and decode the return data
    '''
    ret = {}

    if opts is None:
        if node == 'master':
            opts = salt.config.master_config(
                os.path.join(salt.syspaths.CONFIG_DIR, 'master')
            )
        elif node == 'minion':
            opts = salt.config.minion_config(
                os.path.join(salt.syspaths.CONFIG_DIR, 'minion')
            )
        else:
            opts = {}

    if not backend:
        backend = opts.get('backend', 'tornado')

    match = re.match(r'https?://((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)($|/)', url)
    if not match:
        salt.utils.network.refresh_dns()

    if backend == 'requests':
        if HAS_REQUESTS is False:
            ret['error'] = ('http.query has been set to use requests, but the '
                            'requests library does not seem to be installed')
            log.error(ret['error'])
            return ret
        else:
            requests_log = logging.getLogger('requests')
            requests_log.setLevel(logging.WARNING)

    # Some libraries don't support separation of url and GET parameters
    # Don't need a try/except block, since Salt depends on tornado
    url_full = tornado.httputil.url_concat(url, params) if params else url

    if ca_bundle is None:
        ca_bundle = get_ca_bundle(opts)

    if verify_ssl is None:
        verify_ssl = opts.get('verify_ssl', True)

    if cert is None:
        cert = opts.get('cert', None)

    if data_file is not None:
        data = _render(
            data_file, data_render, data_renderer, template_dict, opts
        )

    # Make sure no secret fields show up in logs
    log_url = sanitize_url(url_full, hide_fields)

    log.debug('Requesting URL %s using %s method', log_url, method)
    log.debug("Using backend: %s", backend)

    if method == 'POST' and log.isEnabledFor(logging.TRACE):
        # Make sure no secret fields show up in logs
        if isinstance(data, dict):
            log_data = data.copy()
            if isinstance(hide_fields, list):
                for item in data:
                    for field in hide_fields:
                        if item == field:
                            log_data[item] = 'XXXXXXXXXX'
            log.trace('Request POST Data: %s', pprint.pformat(log_data))
        else:
            log.trace('Request POST Data: %s', pprint.pformat(data))

    if header_file is not None:
        header_tpl = _render(
            header_file, header_render, header_renderer, template_dict, opts
        )
        if isinstance(header_tpl, dict):
            header_dict = header_tpl
        else:
            header_list = header_tpl.splitlines()

    if header_dict is None:
        header_dict = {}

    if header_list is None:
        header_list = []

    if cookie_jar is None:
        cookie_jar = os.path.join(opts.get('cachedir', salt.syspaths.CACHE_DIR), 'cookies.txt')
    if session_cookie_jar is None:
        session_cookie_jar = os.path.join(opts.get('cachedir', salt.syspaths.CACHE_DIR), 'cookies.session.p')

    if persist_session is True and HAS_MSGPACK:
        # TODO: This is hackish; it will overwrite the session cookie jar with
        # all cookies from this one connection, rather than behaving like a
        # proper cookie jar. Unfortunately, since session cookies do not
        # contain expirations, they can't be stored in a proper cookie jar.
        if os.path.isfile(session_cookie_jar):
            with salt.utils.files.fopen(session_cookie_jar, 'rb') as fh_:
                session_cookies = msgpack.load(fh_)
            if isinstance(session_cookies, dict):
                header_dict.update(session_cookies)
        else:
            with salt.utils.files.fopen(session_cookie_jar, 'wb') as fh_:
                msgpack.dump('', fh_)

    for header in header_list:
        comps = header.split(':')
        if len(comps) < 2:
            continue
        header_dict[comps[0].strip()] = comps[1].strip()

    if not auth:
        if username and password:
            auth = (username, password)

    if agent == USERAGENT:
        agent = '{0} http.query()'.format(agent)
    header_dict['User-agent'] = agent

    if backend == 'requests':
        sess = requests.Session()
        sess.auth = auth
        sess.headers.update(header_dict)
        log.trace('Request Headers: %s', sess.headers)
        sess_cookies = sess.cookies
        sess.verify = verify_ssl
    elif backend == 'urllib2':
        sess_cookies = None
    else:
        # Tornado
        sess_cookies = None

    if cookies is not None:
        if cookie_format == 'mozilla':
            sess_cookies = salt.ext.six.moves.http_cookiejar.MozillaCookieJar(cookie_jar)
        else:
            sess_cookies = salt.ext.six.moves.http_cookiejar.LWPCookieJar(cookie_jar)
        if not os.path.isfile(cookie_jar):
            sess_cookies.save()
        sess_cookies.load()

    if test is True:
        if test_url is None:
            return {}
        else:
            url = test_url
            ret['test'] = True

    if backend == 'requests':
        req_kwargs = {}
        if stream is True:
            if requests.__version__[0] == '0':
                # 'stream' was called 'prefetch' before 1.0, with flipped meaning
                req_kwargs['prefetch'] = False
            else:
                req_kwargs['stream'] = True

        # Client-side cert handling
        if cert is not None:
            if isinstance(cert, six.string_types):
                if os.path.exists(cert):
                    req_kwargs['cert'] = cert
            elif isinstance(cert, list):
                if os.path.exists(cert[0]) and os.path.exists(cert[1]):
                    req_kwargs['cert'] = cert
            else:
                log.error('The client-side certificate path that'
                          ' was passed is not valid: %s', cert)

        result = sess.request(
            method, url, params=params, data=data, **req_kwargs
        )
        result.raise_for_status()
        if stream is True:
            # fake a HTTP response header
            header_callback('HTTP/1.0 {0} MESSAGE'.format(result.status_code))
            # fake streaming the content
            streaming_callback(result.content)
            return {
                'handle': result,
            }

        if handle is True:
            return {
                'handle': result,
                'body': result.content,
            }

        log.debug('Final URL location of Response: %s',
                  sanitize_url(result.url, hide_fields))

        result_status_code = result.status_code
        result_headers = result.headers
        result_text = result.content
        result_cookies = result.cookies
        body = result.content
        if not isinstance(body, six.text_type):
            body = body.decode(result.encoding or 'utf-8')
        ret['body'] = body
    elif backend == 'urllib2':
        request = urllib_request.Request(url_full, data)
        handlers = [
            urllib_request.HTTPHandler,
            urllib_request.HTTPCookieProcessor(sess_cookies)
        ]

        if url.startswith('https'):
            hostname = request.get_host()
            handlers[0] = urllib_request.HTTPSHandler(1)
            if not HAS_MATCHHOSTNAME:
                log.warning('match_hostname() not available, SSL hostname checking '
                            'not available. THIS CONNECTION MAY NOT BE SECURE!')
            elif verify_ssl is False:
                log.warning('SSL certificate verification has been explicitly '
                            'disabled. THIS CONNECTION MAY NOT BE SECURE!')
            else:
                if ':' in hostname:
                    hostname, port = hostname.split(':')
                else:
                    port = 443
                sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                sock.connect((hostname, int(port)))
                sockwrap = ssl.wrap_socket(
                    sock,
                    ca_certs=ca_bundle,
                    cert_reqs=ssl.CERT_REQUIRED
                )
                try:
                    match_hostname(sockwrap.getpeercert(), hostname)
                except CertificateError as exc:
                    ret['error'] = (
                        'The certificate was invalid. '
                        'Error returned was: %s',
                        pprint.pformat(exc)
                        )
                    return ret

                # Client-side cert handling
                if cert is not None:
                    cert_chain = None
                    if isinstance(cert, six.string_types):
                        if os.path.exists(cert):
                            cert_chain = (cert)
                    elif isinstance(cert, list):
                        if os.path.exists(cert[0]) and os.path.exists(cert[1]):
                            cert_chain = cert
                    else:
                        log.error('The client-side certificate path that was '
                                  'passed is not valid: %s', cert)
                        return
                    if hasattr(ssl, 'SSLContext'):
                        # Python >= 2.7.9
                        context = ssl.SSLContext.load_cert_chain(*cert_chain)
                        handlers.append(urllib_request.HTTPSHandler(context=context))  # pylint: disable=E1123
                    else:
                        # Python < 2.7.9
                        cert_kwargs = {
                            'host': request.get_host(),
                            'port': port,
                            'cert_file': cert_chain[0]
                        }
                        if len(cert_chain) > 1:
                            cert_kwargs['key_file'] = cert_chain[1]
                        handlers[0] = salt.ext.six.moves.http_client.HTTPSConnection(**cert_kwargs)

        opener = urllib_request.build_opener(*handlers)
        for header in header_dict:
            request.add_header(header, header_dict[header])
        request.get_method = lambda: method
        try:
            result = opener.open(request)
        except URLError as exc:
            return {'Error': six.text_type(exc)}
        if stream is True or handle is True:
            return {
                'handle': result,
                'body': result.content,
            }

        result_status_code = result.code
        result_headers = dict(result.info())
        result_text = result.read()
        if 'Content-Type' in result_headers:
            res_content_type, res_params = cgi.parse_header(result_headers['Content-Type'])
            if res_content_type.startswith('text/') and \
                    'charset' in res_params and \
                    not isinstance(result_text, six.text_type):
                result_text = result_text.decode(res_params['charset'])
        if six.PY3 and isinstance(result_text, bytes):
            result_text = result_text.decode('utf-8')
        ret['body'] = result_text
    else:
        # Tornado
        req_kwargs = {}

        # Client-side cert handling
        if cert is not None:
            if isinstance(cert, six.string_types):
                if os.path.exists(cert):
                    req_kwargs['client_cert'] = cert
            elif isinstance(cert, list):
                if os.path.exists(cert[0]) and os.path.exists(cert[1]):
                    req_kwargs['client_cert'] = cert[0]
                    req_kwargs['client_key'] = cert[1]
            else:
                log.error('The client-side certificate path that '
                          'was passed is not valid: %s', cert)

        if isinstance(data, dict):
            data = _urlencode(data)

        if verify_ssl:
            req_kwargs['ca_certs'] = ca_bundle

        max_body = opts.get('http_max_body', salt.config.DEFAULT_MINION_OPTS['http_max_body'])
        connect_timeout = opts.get('http_connect_timeout', salt.config.DEFAULT_MINION_OPTS['http_connect_timeout'])
        timeout = opts.get('http_request_timeout', salt.config.DEFAULT_MINION_OPTS['http_request_timeout'])

        client_argspec = None

        proxy_host = opts.get('proxy_host', None)
        if proxy_host:
            # tornado requires a str for proxy_host, cannot be a unicode str in py2
            proxy_host = salt.utils.stringutils.to_str(proxy_host)
        proxy_port = opts.get('proxy_port', None)
        proxy_username = opts.get('proxy_username', None)
        if proxy_username:
            # tornado requires a str, cannot be unicode str in py2
            proxy_username = salt.utils.stringutils.to_str(proxy_username)
        proxy_password = opts.get('proxy_password', None)
        if proxy_password:
            # tornado requires a str, cannot be unicode str in py2
            proxy_password = salt.utils.stringutils.to_str(proxy_password)
        no_proxy = opts.get('no_proxy', [])

        # Since tornado doesnt support no_proxy, we'll always hand it empty proxies or valid ones
        # except we remove the valid ones if a url has a no_proxy hostname in it
        if urlparse(url_full).hostname in no_proxy:
            proxy_host = None
            proxy_port = None

        # We want to use curl_http if we have a proxy defined
        if proxy_host and proxy_port:
            if HAS_CURL_HTTPCLIENT is False:
                ret['error'] = ('proxy_host and proxy_port has been set. This requires pycurl and tornado, '
                                'but the libraries does not seem to be installed')
                log.error(ret['error'])
                return ret

            tornado.httpclient.AsyncHTTPClient.configure('tornado.curl_httpclient.CurlAsyncHTTPClient')
            client_argspec = salt.utils.args.get_function_argspec(
                    tornado.curl_httpclient.CurlAsyncHTTPClient.initialize)
        else:
            client_argspec = salt.utils.args.get_function_argspec(
                    tornado.simple_httpclient.SimpleAsyncHTTPClient.initialize)

        supports_max_body_size = 'max_body_size' in client_argspec.args

        req_kwargs.update({
            'method': method,
            'headers': header_dict,
            'auth_username': username,
            'auth_password': password,
            'body': data,
            'validate_cert': verify_ssl,
            'allow_nonstandard_methods': True,
            'streaming_callback': streaming_callback,
            'header_callback': header_callback,
            'connect_timeout': connect_timeout,
            'request_timeout': timeout,
            'proxy_host': proxy_host,
            'proxy_port': proxy_port,
            'proxy_username': proxy_username,
            'proxy_password': proxy_password,
            'raise_error': raise_error,
            'decompress_response': False,
        })

        # Unicode types will cause a TypeError when Tornado's curl HTTPClient
        # invokes setopt. Therefore, make sure all arguments we pass which
        # contain strings are str types.
        req_kwargs = salt.utils.data.decode(req_kwargs, to_str=True)

        try:
            download_client = HTTPClient(max_body_size=max_body) \
                if supports_max_body_size \
                else HTTPClient()
            result = download_client.fetch(url_full, **req_kwargs)
        except tornado.httpclient.HTTPError as exc:
            ret['status'] = exc.code
            ret['error'] = six.text_type(exc)
            return ret
        except socket.gaierror as exc:
            if status is True:
                ret['status'] = 0
            ret['error'] = six.text_type(exc)
            return ret

        if stream is True or handle is True:
            return {
                'handle': result,
                'body': result.body,
            }

        result_status_code = result.code
        result_headers = result.headers
        result_text = result.body
        if 'Content-Type' in result_headers:
            res_content_type, res_params = cgi.parse_header(result_headers['Content-Type'])
            if res_content_type.startswith('text/') and \
                    'charset' in res_params and \
                    not isinstance(result_text, six.text_type):
                result_text = result_text.decode(res_params['charset'])
        if six.PY3 and isinstance(result_text, bytes):
            result_text = result_text.decode('utf-8')
        ret['body'] = result_text
        if 'Set-Cookie' in result_headers and cookies is not None:
            result_cookies = parse_cookie_header(result_headers['Set-Cookie'])
            for item in result_cookies:
                sess_cookies.set_cookie(item)
        else:
            result_cookies = None

    if isinstance(result_headers, list):
        result_headers_dict = {}
        for header in result_headers:
            comps = header.split(':')
            result_headers_dict[comps[0].strip()] = ':'.join(comps[1:]).strip()
        result_headers = result_headers_dict

    log.debug('Response Status Code: %s', result_status_code)
    log.trace('Response Headers: %s', result_headers)
    log.trace('Response Cookies: %s', sess_cookies)
    # log.trace("Content: %s", result_text)

    coding = result_headers.get('Content-Encoding', "identity")

    # Requests will always decompress the content, and working around that is annoying.
    if backend != 'requests':
        result_text = __decompressContent(coding, result_text)

    try:
        log.trace('Response Text: %s', result_text)
    except UnicodeEncodeError as exc:
        log.trace('Cannot Trace Log Response Text: %s. This may be due to '
                  'incompatibilities between requests and logging.', exc)

    if text_out is not None:
        with salt.utils.files.fopen(text_out, 'w') as tof:
            tof.write(result_text)

    if headers_out is not None and os.path.exists(headers_out):
        with salt.utils.files.fopen(headers_out, 'w') as hof:
            hof.write(result_headers)

    if cookies is not None:
        sess_cookies.save()

    if persist_session is True and HAS_MSGPACK:
        # TODO: See persist_session above
        if 'set-cookie' in result_headers:
            with salt.utils.files.fopen(session_cookie_jar, 'wb') as fh_:
                session_cookies = result_headers.get('set-cookie', None)
                if session_cookies is not None:
                    msgpack.dump({'Cookie': session_cookies}, fh_)
                else:
                    msgpack.dump('', fh_)

    if status is True:
        ret['status'] = result_status_code

    if headers is True:
        ret['headers'] = result_headers

    if decode is True:
        if decode_type == 'auto':
            content_type = result_headers.get(
                'content-type', 'application/json'
            )
            if 'xml' in content_type:
                decode_type = 'xml'
            elif 'json' in content_type:
                decode_type = 'json'
            elif 'yaml' in content_type:
                decode_type = 'yaml'
            else:
                decode_type = 'plain'

        valid_decodes = ('json', 'xml', 'yaml', 'plain')
        if decode_type not in valid_decodes:
            ret['error'] = (
                'Invalid decode_type specified. '
                'Valid decode types are: {0}'.format(
                    pprint.pformat(valid_decodes)
                )
            )
            log.error(ret['error'])
            return ret

        if decode_type == 'json':
            ret['dict'] = salt.utils.json.loads(result_text)
        elif decode_type == 'xml':
            ret['dict'] = []
            items = ET.fromstring(result_text)
            for item in items:
                ret['dict'].append(xml.to_dict(item))
        elif decode_type == 'yaml':
            ret['dict'] = salt.utils.data.decode(salt.utils.yaml.safe_load(result_text))
        else:
            text = True

        if decode_out:
            with salt.utils.files.fopen(decode_out, 'w') as dof:
                dof.write(result_text)

    if text is True:
        ret['text'] = result_text

    return ret
示例#27
0
文件: aws.py 项目: DaveQB/salt
def query(params=None, setname=None, requesturl=None, location=None,
          return_url=False, return_root=False, opts=None, provider=None,
          endpoint=None, product='ec2', sigver='2'):
    '''
    Perform a query against AWS services using Signature Version 2 Signing
    Process. This is documented at:

    http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html

    Regions and endpoints are documented at:

    http://docs.aws.amazon.com/general/latest/gr/rande.html

    Default ``product`` is ``ec2``. Valid ``product`` names are:

    .. code-block: yaml

        - autoscaling (Auto Scaling)
        - cloudformation (CloudFormation)
        - ec2 (Elastic Compute Cloud)
        - elasticache (ElastiCache)
        - elasticbeanstalk (Elastic BeanStalk)
        - elasticloadbalancing (Elastic Load Balancing)
        - elasticmapreduce (Elastic MapReduce)
        - iam (Identity and Access Management)
        - importexport (Import/Export)
        - monitoring (CloudWatch)
        - rds (Relational Database Service)
        - simpledb (SimpleDB)
        - sns (Simple Notification Service)
        - sqs (Simple Queue Service)
    '''
    if params is None:
        params = {}

    if opts is None:
        opts = {}

    function = opts.get('function', (None, product))
    providers = opts.get('providers', {})

    if provider is None:
        prov_dict = providers.get(function[1], {}).get(product, {})
        if prov_dict:
            driver = list(list(prov_dict.keys()))[0]
            provider = providers.get(driver, product)
    else:
        prov_dict = providers.get(provider, {}).get(product, {})

    service_url = prov_dict.get('service_url', 'amazonaws.com')

    if not location:
        location = get_location(opts, provider)

    if endpoint is None:
        if not requesturl:
            endpoint = prov_dict.get(
                'endpoint',
                '{0}.{1}.{2}'.format(product, location, service_url)
            )

            requesturl = 'https://{0}/'.format(endpoint)
        else:
            endpoint = urlparse(requesturl).netloc
            if endpoint == '':
                endpoint_err = ('Could not find a valid endpoint in the '
                                'requesturl: {0}. Looking for something '
                                'like https://some.aws.endpoint/?args').format(
                                    requesturl
                                )
                LOG.error(endpoint_err)
                if return_url is True:
                    return {'error': endpoint_err}, requesturl
                return {'error': endpoint_err}

    LOG.debug('Using AWS endpoint: {0}'.format(endpoint))
    method = 'GET'

    aws_api_version = prov_dict.get(
        'aws_api_version', prov_dict.get(
            '{0}_api_version'.format(product),
            DEFAULT_AWS_API_VERSION
        )
    )

    if sigver == '4':
        headers, requesturl = sig4(
            method, endpoint, params, prov_dict, aws_api_version, location, product, requesturl=requesturl
        )
        params_with_headers = {}
    else:
        params_with_headers = sig2(
            method, endpoint, params, prov_dict, aws_api_version
        )
        headers = {}

    attempts = 5
    while attempts > 0:
        LOG.debug('AWS Request: {0}'.format(requesturl))
        LOG.trace('AWS Request Parameters: {0}'.format(params_with_headers))
        try:
            result = requests.get(requesturl, headers=headers, params=params_with_headers)
            LOG.debug(
                'AWS Response Status Code: {0}'.format(
                    result.status_code
                )
            )
            LOG.trace(
                'AWS Response Text: {0}'.format(
                    result.text
                )
            )
            result.raise_for_status()
            break
        except requests.exceptions.HTTPError as exc:
            root = ET.fromstring(exc.response.content)
            data = xml.to_dict(root)

            # check to see if we should retry the query
            err_code = data.get('Errors', {}).get('Error', {}).get('Code', '')
            if attempts > 0 and err_code and err_code in AWS_RETRY_CODES:
                attempts -= 1
                LOG.error(
                    'AWS Response Status Code and Error: [{0} {1}] {2}; '
                    'Attempts remaining: {3}'.format(
                        exc.response.status_code, exc, data, attempts
                    )
                )
                # Wait a bit before continuing to prevent throttling
                time.sleep(2)
                continue

            LOG.error(
                'AWS Response Status Code and Error: [{0} {1}] {2}'.format(
                    exc.response.status_code, exc, data
                )
            )
            if return_url is True:
                return {'error': data}, requesturl
            return {'error': data}
    else:
        LOG.error(
            'AWS Response Status Code and Error: [{0} {1}] {2}'.format(
                exc.response.status_code, exc, data
            )
        )
        if return_url is True:
            return {'error': data}, requesturl
        return {'error': data}

    response = result.text

    root = ET.fromstring(response)
    items = root[1]
    if return_root is True:
        items = root

    if setname:
        if sys.version_info < (2, 7):
            children_len = len(root.getchildren())
        else:
            children_len = len(root)

        for item in range(0, children_len):
            comps = root[item].tag.split('}')
            if comps[1] == setname:
                items = root[item]

    ret = []
    for item in items:
        ret.append(xml.to_dict(item))

    if return_url is True:
        return ret, requesturl

    return ret
示例#28
0
 def test_xml_case_f_full(self):
     xmldata = ET.fromstring(self.cases['f']['xml'])
     defaultdict = xml.to_dict(xmldata, True)
     self.assertEqual(defaultdict, self.cases['f']['full'])
示例#29
0
def create_node(vm_):
    '''
    Build and submit the XML to create a node
    '''
    # Start the tree
    content = ET.Element('ve')

    # Name of the instance
    name = ET.SubElement(content, 'name')
    name.text = vm_['name']

    # Description, defaults to name
    desc = ET.SubElement(content, 'description')
    desc.text = config.get_cloud_config_value('desc',
                                              vm_,
                                              __opts__,
                                              default=vm_['name'],
                                              search_global=False)

    # How many CPU cores, and how fast they are
    cpu = ET.SubElement(content, 'cpu')
    cpu.attrib['number'] = config.get_cloud_config_value('cpu_number',
                                                         vm_,
                                                         __opts__,
                                                         default='1',
                                                         search_global=False)
    cpu.attrib['power'] = config.get_cloud_config_value('cpu_power',
                                                        vm_,
                                                        __opts__,
                                                        default='1000',
                                                        search_global=False)

    # How many megabytes of RAM
    ram = ET.SubElement(content, 'ram-size')
    ram.text = config.get_cloud_config_value('ram',
                                             vm_,
                                             __opts__,
                                             default='256',
                                             search_global=False)

    # Bandwidth available, in kbps
    bandwidth = ET.SubElement(content, 'bandwidth')
    bandwidth.text = config.get_cloud_config_value('bandwidth',
                                                   vm_,
                                                   __opts__,
                                                   default='100',
                                                   search_global=False)

    # How many public IPs will be assigned to this instance
    ip_num = ET.SubElement(content, 'no-of-public-ip')
    ip_num.text = config.get_cloud_config_value('ip_num',
                                                vm_,
                                                __opts__,
                                                default='1',
                                                search_global=False)

    # Size of the instance disk
    disk = ET.SubElement(content, 've-disk')
    disk.attrib['local'] = 'true'
    disk.attrib['size'] = config.get_cloud_config_value('disk_size',
                                                        vm_,
                                                        __opts__,
                                                        default='10',
                                                        search_global=False)

    # Attributes for the image
    vm_image = config.get_cloud_config_value('image',
                                             vm_,
                                             __opts__,
                                             search_global=False)
    image = show_image({'image': vm_image}, call='function')
    platform = ET.SubElement(content, 'platform')
    template = ET.SubElement(platform, 'template-info')
    template.attrib['name'] = vm_image
    os_info = ET.SubElement(platform, 'os-info')
    os_info.attrib['technology'] = image[vm_image]['technology']
    os_info.attrib['type'] = image[vm_image]['osType']

    # Username and password
    admin = ET.SubElement(content, 'admin')
    admin.attrib['login'] = config.get_cloud_config_value('ssh_username',
                                                          vm_,
                                                          __opts__,
                                                          default='root')
    admin.attrib['password'] = config.get_cloud_config_value(
        'password', vm_, __opts__, search_global=False)

    data = ET.tostring(content, encoding='UTF-8')

    salt.utils.cloud.fire_event('event',
                                'requesting instance',
                                'salt/cloud/{0}/requesting'.format(
                                    vm_['name']), {'kwargs': data},
                                transport=__opts__['transport'])

    node = query(action='ve', method='POST', data=data)
    return node
示例#30
0
文件: parallels.py 项目: DaveQB/salt
def query(action=None, command=None, args=None, method='GET', data=None):
    '''
    Make a web call to a Parallels provider
    '''
    path = config.get_cloud_config_value(
        'url', get_configured_provider(), __opts__, search_global=False
    )
    auth_handler = _HTTPBasicAuthHandler()
    auth_handler.add_password(
        realm='Parallels Instance Manager',
        uri=path,
        user=config.get_cloud_config_value(
            'user', get_configured_provider(), __opts__, search_global=False
        ),
        passwd=config.get_cloud_config_value(
            'password', get_configured_provider(), __opts__,
            search_global=False
        )
    )
    opener = _build_opener(auth_handler)
    _install_opener(opener)

    if action:
        path += action

    if command:
        path += '/{0}'.format(command)

    if not type(args, dict):
        args = {}

    kwargs = {'data': data}
    if isinstance(data, str) and '<?xml' in data:
        kwargs['headers'] = {
            'Content-type': 'application/xml',
        }

    if args:
        params = _urlencode(args)
        req = _Request(url='{0}?{1}'.format(path, params), **kwargs)
    else:
        req = _Request(url=path, **kwargs)

    req.get_method = lambda: method

    log.debug('{0} {1}'.format(method, req.get_full_url()))
    if data:
        log.debug(data)

    try:
        result = _urlopen(req)
        log.debug(
            'PARALLELS Response Status Code: {0}'.format(
                result.getcode()
            )
        )

        if 'content-length' in result.headers:
            content = result.read()
            result.close()
            items = ET.fromstring(content)
            return items

        return {}
    except URLError as exc:
        log.error(
            'PARALLELS Response Status Code: {0} {1}'.format(
                exc.code,
                exc.msg
            )
        )
        root = ET.fromstring(exc.read())
        log.error(root)
        return {'error': root}
示例#31
0
def test_update_clock(make_mock_vm):
    """
    test virt.update with clock parameter
    """
    xml_def = """
        <domain type="kvm">
          <name>my_vm</name>
          <memory unit='KiB'>524288</memory>
          <currentMemory unit='KiB'>524288</currentMemory>
          <vcpu placement='static'>1</vcpu>
          <os>
            <type arch='x86_64'>linux</type>
            <kernel>/usr/lib/grub2/x86_64-xen/grub.xen</kernel>
          </os>
          <clock offset="localtime" adjustment="-3600">
            <timer name="tsc" frequency="3504000000" mode="native" />
            <timer name="kvmclock" present="no" />
          </clock>
          <on_reboot>restart</on_reboot>
        </domain>
    """
    domain_mock = make_mock_vm(xml_def)

    # Update with no change to the features
    ret = virt.update(
        "my_vm",
        clock={
            "utc": False,
            "adjustment": -3600,
            "timers": {
                "tsc": {
                    "frequency": 3504000000,
                    "mode": "native"
                },
                "kvmclock": {
                    "present": False
                },
            },
        },
    )
    assert not ret["definition"]

    # Update
    ret = virt.update(
        "my_vm",
        clock={
            "timezone": "CEST",
            "timers": {
                "rtc": {
                    "track": "wall",
                    "tickpolicy": "catchup",
                    "slew": 4636,
                    "threshold": 123,
                    "limit": 2342,
                },
                "hpet": {
                    "present": True
                },
            },
        },
    )
    assert ret["definition"]
    setxml = ET.fromstring(virt.libvirt.openAuth().defineXML.call_args[0][0])
    assert "timezone" == setxml.find("clock").get("offset")
    assert "CEST" == setxml.find("clock").get("timezone")
    assert {"rtc",
            "hpet"} == {t.get("name")
                        for t in setxml.findall("clock/timer")}
    assert "catchup" == setxml.find("clock/timer[@name='rtc']").get(
        "tickpolicy")
    assert "wall" == setxml.find("clock/timer[@name='rtc']").get("track")
    assert {
        "slew": "4636",
        "threshold": "123",
        "limit": "2342"
    } == setxml.find("clock/timer[@name='rtc']/catchup").attrib
    assert "yes" == setxml.find("clock/timer[@name='hpet']").get("present")

    # Revert to UTC
    ret = virt.update("my_vm",
                      clock={
                          "utc": True,
                          "adjustment": None,
                          "timers": None
                      })
    assert ret["definition"]
    setxml = ET.fromstring(virt.libvirt.openAuth().defineXML.call_args[0][0])
    assert {"offset": "utc"} == setxml.find("clock").attrib
    assert setxml.find("clock/timer") is None
示例#32
0
文件: bamboohr.py 项目: DaveQB/salt
def show_employee(emp_id, fields=None):
    '''
    Show all employees for this company.

    CLI Example:

        salt myminion bamboohr.show_employee 1138

    By default, the fields normally returned from bamboohr.list_employees are
    returned. These fields are:

        - canUploadPhoto
        - department
        - displayName
        - firstName
        - id
        - jobTitle
        - lastName
        - location
        - mobilePhone
        - nickname
        - photoUploaded
        - photoUrl
        - workEmail
        - workPhone
        - workPhoneExtension

    If needed, a different set of fields may be specified, separated by commas:

    CLI Example:

        salt myminion bamboohr.show_employee 1138 displayName,dateOfBirth

    A list of available fields can be found at
    http://www.bamboohr.com/api/documentation/employees.php
    '''
    ret = {}
    if fields is None:
        fields = ','.join((
            'canUploadPhoto',
            'department',
            'displayName',
            'firstName',
            'id',
            'jobTitle',
            'lastName',
            'location',
            'mobilePhone',
            'nickname',
            'photoUploaded',
            'photoUrl',
            'workEmail',
            'workPhone',
            'workPhoneExtension',
        ))

    status, result = _query(
        action='employees',
        command=emp_id,
        args={'fields': fields}
    )

    root = ET.fromstring(result)
    items = root.getchildren()

    ret = {'id': emp_id}
    for item in items:
        ret[item.items()[0][1]] = item.text
    return ret
示例#33
0
文件: s3.py 项目: shineforever/ops
def query(key, keyid, method='GET', params=None, headers=None,
          requesturl=None, return_url=False, bucket=None, service_url=None,
          path='', return_bin=False, action=None, local_file=None,
          verify_ssl=True, location=None, full_headers=False):
    '''
    Perform a query against an S3-like API. This function requires that a
    secret key and the id for that key are passed in. For instance:

        s3.keyid: GKTADJGHEIQSXMKKRBJ08H
        s3.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs

    If keyid or key is not specified, an attempt to fetch them from EC2 IAM
    metadata service will be made.

    A service_url may also be specified in the configuration:

        s3.service_url: s3.amazonaws.com

    If a service_url is not specified, the default is s3.amazonaws.com. This
    may appear in various documentation as an "endpoint". A comprehensive list
    for Amazon S3 may be found at::

        http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region

    The service_url will form the basis for the final endpoint that is used to
    query the service.

    SSL verification may also be turned off in the configuration:

    s3.verify_ssl: False

    This is required if using S3 bucket names that contain a period, as
    these will not match Amazon's S3 wildcard certificates. Certificate
    verification is enabled by default.

    A region may be specified:

        s3.location: eu-central-1

    If region is not specified, an attempt to fetch the region from EC2 IAM
    metadata service will be made. Failing that, default is us-east-1
    '''
    if not HAS_REQUESTS:
        log.error('There was an error: requests is required for s3 access')

    if not headers:
        headers = {}

    if not params:
        params = {}

    if not service_url:
        service_url = 's3.amazonaws.com'

    if bucket:
        endpoint = '{0}.{1}'.format(bucket, service_url)
    else:
        endpoint = service_url

    # Try grabbing the credentials from the EC2 instance IAM metadata if available
    if not key or not keyid:
        key = salt.utils.aws.IROLE_CODE
        keyid = salt.utils.aws.IROLE_CODE

    if not location:
        location = iam.get_iam_region()
    if not location:
        location = DEFAULT_LOCATION

    data = ''
    if method == 'PUT':
        if local_file:
            with salt.utils.fopen(local_file, 'r') as ifile:
                data = ifile.read()

    if not requesturl:
        requesturl = 'https://{0}/{1}'.format(endpoint, path)
        headers, requesturl = salt.utils.aws.sig4(
            method,
            endpoint,
            params,
            data=data,
            uri='/{0}'.format(path),
            prov_dict={'id': keyid, 'key': key},
            location=location,
            product='s3',
            requesturl=requesturl,
        )

    log.debug('S3 Request: {0}'.format(requesturl))
    log.debug('S3 Headers::')
    log.debug('    Authorization: {0}'.format(headers['Authorization']))

    if not data:
        data = None

    try:
        result = requests.request(method, requesturl, headers=headers,
                                  data=data,
                                  verify=verify_ssl)
        response = result.content
    except requests.exceptions.HTTPError as exc:
        log.error('There was an error::')
        if hasattr(exc, 'code') and hasattr(exc, 'msg'):
            log.error('    Code: {0}: {1}'.format(exc.code, exc.msg))
        log.error('    Content: \n{0}'.format(exc.read()))
        return False

    log.debug('S3 Response Status Code: {0}'.format(result.status_code))

    if method == 'PUT':
        if result.status_code == 200:
            if local_file:
                log.debug('Uploaded from {0} to {1}'.format(local_file, path))
            else:
                log.debug('Created bucket {0}'.format(bucket))
        else:
            if local_file:
                log.debug('Failed to upload from {0} to {1}: {2}'.format(
                                                    local_file,
                                                    path,
                                                    result.status_code,
                                                    ))
            else:
                log.debug('Failed to create bucket {0}'.format(bucket))
        return

    if method == 'DELETE':
        if str(result.status_code).startswith('2'):
            if path:
                log.debug('Deleted {0} from bucket {1}'.format(path, bucket))
            else:
                log.debug('Deleted bucket {0}'.format(bucket))
        else:
            if path:
                log.debug('Failed to delete {0} from bucket {1}: {2}'.format(
                                                    path,
                                                    bucket,
                                                    result.status_code,
                                                    ))
            else:
                log.debug('Failed to delete bucket {0}'.format(bucket))
        return

    # This can be used to save a binary object to disk
    if local_file and method == 'GET':
        log.debug('Saving to local file: {0}'.format(local_file))
        with salt.utils.fopen(local_file, 'w') as out:
            out.write(response)
        return 'Saved to local file: {0}'.format(local_file)

    # This can be used to return a binary object wholesale
    if return_bin:
        return response

    if response:
        items = ET.fromstring(response)

        ret = []
        for item in items:
            ret.append(xml.to_dict(item))

        if return_url is True:
            return ret, requesturl
    else:
        if result.status_code != requests.codes.ok:
            return
        ret = {'headers': []}
        if full_headers:
            ret['headers'] = dict(result.headers)
        else:
            for header in result.headers:
                ret['headers'].append(header.strip())

    return ret
示例#34
0
文件: s3.py 项目: mjura/salt-1
def query(key, keyid, method='GET', params=None, headers=None,
          requesturl=None, return_url=False, bucket=None, service_url=None,
          path='', return_bin=False, action=None, local_file=None,
          verify_ssl=True, location=None, full_headers=False):
    '''
    Perform a query against an S3-like API. This function requires that a
    secret key and the id for that key are passed in. For instance:

        s3.keyid: GKTADJGHEIQSXMKKRBJ08H
        s3.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs

    If keyid or key is not specified, an attempt to fetch them from EC2 IAM
    metadata service will be made.

    A service_url may also be specified in the configuration:

        s3.service_url: s3.amazonaws.com

    If a service_url is not specified, the default is s3.amazonaws.com. This
    may appear in various documentation as an "endpoint". A comprehensive list
    for Amazon S3 may be found at::

        http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region

    The service_url will form the basis for the final endpoint that is used to
    query the service.

    SSL verification may also be turned off in the configuration:

    s3.verify_ssl: False

    This is required if using S3 bucket names that contain a period, as
    these will not match Amazon's S3 wildcard certificates. Certificate
    verification is enabled by default.

    A region may be specified:

        s3.location: eu-central-1

    If region is not specified, an attempt to fetch the region from EC2 IAM
    metadata service will be made. Failing that, default is us-east-1
    '''
    if not HAS_REQUESTS:
        log.error('There was an error: requests is required for s3 access')

    if not headers:
        headers = {}

    if not params:
        params = {}

    if not service_url:
        service_url = 's3.amazonaws.com'

    if bucket:
        endpoint = '{0}.{1}'.format(bucket, service_url)
    else:
        endpoint = service_url

    # Try grabbing the credentials from the EC2 instance IAM metadata if available
    if not key or not keyid:
        key = salt.utils.aws.IROLE_CODE
        keyid = salt.utils.aws.IROLE_CODE

    data = ''
    if method == 'PUT':
        if local_file:
            with salt.utils.fopen(local_file, 'r') as ifile:
                data = ifile.read()

    if not requesturl:
        requesturl = 'https://{0}/{1}'.format(endpoint, path)
        headers, requesturl = salt.utils.aws.sig4(
            method,
            endpoint,
            params,
            data=data,
            uri='/{0}'.format(path),
            prov_dict={'id': keyid, 'key': key},
            location=location,
            product='s3',
            requesturl=requesturl,
        )

    log.debug('S3 Request: {0}'.format(requesturl))
    log.debug('S3 Headers::')
    log.debug('    Authorization: {0}'.format(headers['Authorization']))

    if not data:
        data = None

    try:
        result = requests.request(method, requesturl, headers=headers,
                                  data=data,
                                  verify=verify_ssl)
        response = result.content
    except requests.exceptions.HTTPError as exc:
        log.error('There was an error::')
        if hasattr(exc, 'code') and hasattr(exc, 'msg'):
            log.error('    Code: {0}: {1}'.format(exc.code, exc.msg))
        log.error('    Content: \n{0}'.format(exc.read()))
        return False

    log.debug('S3 Response Status Code: {0}'.format(result.status_code))

    if method == 'PUT':
        if result.status_code == 200:
            if local_file:
                log.debug('Uploaded from {0} to {1}'.format(local_file, path))
            else:
                log.debug('Created bucket {0}'.format(bucket))
        else:
            if local_file:
                log.debug('Failed to upload from {0} to {1}: {2}'.format(
                                                    local_file,
                                                    path,
                                                    result.status_code,
                                                    ))
            else:
                log.debug('Failed to create bucket {0}'.format(bucket))
        return

    if method == 'DELETE':
        if str(result.status_code).startswith('2'):
            if path:
                log.debug('Deleted {0} from bucket {1}'.format(path, bucket))
            else:
                log.debug('Deleted bucket {0}'.format(bucket))
        else:
            if path:
                log.debug('Failed to delete {0} from bucket {1}: {2}'.format(
                                                    path,
                                                    bucket,
                                                    result.status_code,
                                                    ))
            else:
                log.debug('Failed to delete bucket {0}'.format(bucket))
        return

    # This can be used to save a binary object to disk
    if local_file and method == 'GET':
        log.debug('Saving to local file: {0}'.format(local_file))
        with salt.utils.fopen(local_file, 'w') as out:
            out.write(response)
        return 'Saved to local file: {0}'.format(local_file)

    # This can be used to return a binary object wholesale
    if return_bin:
        return response

    if response:
        items = ET.fromstring(response)

        ret = []
        for item in items:
            ret.append(xml.to_dict(item))

        if return_url is True:
            return ret, requesturl
    else:
        if result.status_code != requests.codes.ok:
            return
        ret = {'headers': []}
        if full_headers:
            ret['headers'] = dict(result.headers)
        else:
            for header in result.headers:
                ret['headers'].append(header.strip())

    return ret
示例#35
0
文件: s3.py 项目: swipswaps/hubble
def query(key, keyid, method='GET', params=None, headers=None,
          requesturl=None, return_url=False, bucket=None, service_url=None,
          path='', return_bin=False, action=None, local_file=None,
          verify_ssl=True, full_headers=False, kms_keyid=None,
          location=None, role_arn=None, chunk_size=16384, path_style=False,
          https_enable=True):
    """
    Perform a query against an S3-like API. This function requires that a
    secret key and the id for that key are passed in. For instance:

        s3.keyid: GKTADJGHEIQSXMKKRBJ08H
        s3.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs

    If keyid or key is not specified, an attempt to fetch them from EC2 IAM
    metadata service will be made.

    A service_url may also be specified in the configuration:

        s3.service_url: s3.amazonaws.com

    If a service_url is not specified, the default is s3.amazonaws.com. This
    may appear in various documentation as an "endpoint". A comprehensive list
    for Amazon S3 may be found at::

        http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region

    The service_url will form the basis for the final endpoint that is used to
    query the service.

    Path style can be enabled:

        s3.path_style: True

    This can be useful if you need to use salt with a proxy for an s3 compatible storage

    You can use either https protocol or http protocol:

        s3.https_enable: True

    SSL verification may also be turned off in the configuration:

        s3.verify_ssl: False

    This is required if using S3 bucket names that contain a period, as
    these will not match Amazon's S3 wildcard certificates. Certificate
    verification is enabled by default.

    A region may be specified:

        s3.location: eu-central-1

    If region is not specified, an attempt to fetch the region from EC2 IAM
    metadata service will be made. Failing that, default is us-east-1
    """
    if not HAS_REQUESTS:
        log.error('There was an error: requests is required for s3 access')

    if not headers:
        headers = {}

    if not params:
        params = {}

    if not service_url:
        service_url = 's3.amazonaws.com'

    if not bucket or path_style:
        endpoint = service_url
    else:
        endpoint = '{0}.{1}'.format(bucket, service_url)

    if path_style and bucket:
        path = '{0}/{1}'.format(bucket, path)

    # Try grabbing the credentials from the EC2 instance IAM metadata if available
    if not key:
        key = salt.utils.aws.IROLE_CODE

    if not keyid:
        keyid = salt.utils.aws.IROLE_CODE

    if kms_keyid is not None and method in ('PUT', 'POST'):
        headers['x-amz-server-side-encryption'] = 'aws:kms'
        headers['x-amz-server-side-encryption-aws-kms-key-id'] = kms_keyid

    if not location:
        location = salt.utils.aws.get_location()

    data = ''
    fh = None
    payload_hash = None
    if method == 'PUT':
        if local_file:
            payload_hash = salt.utils.hashutils.get_hash(local_file, form='sha256')

    if path is None:
        path = ''
    path = _quote(path)

    if not requesturl:
        requesturl = (('https' if https_enable else 'http')+'://{0}/{1}').format(endpoint, path)
        headers, requesturl = salt.utils.aws.sig4(
            method,
            endpoint,
            params,
            data=data,
            uri='/{0}'.format(path),
            prov_dict={'id': keyid, 'key': key},
            role_arn=role_arn,
            location=location,
            product='s3',
            requesturl=requesturl,
            headers=headers,
            payload_hash=payload_hash,
        )

    log.debug('S3 Request: %s', requesturl)
    log.debug('S3 Headers::')
    log.debug('    Authorization: %s', headers['Authorization'])

    if not data:
        data = None

    try:
        if method == 'PUT':
            if local_file:
                fh = salt.utils.files.fopen(local_file, 'rb')  # pylint: disable=resource-leakage
                data = fh.read()  # pylint: disable=resource-leakage
            result = requests.request(method,
                                      requesturl,
                                      headers=headers,
                                      data=data,
                                      verify=verify_ssl,
                                      stream=True,
                                      timeout=300)
        elif method == 'GET' and local_file and not return_bin:
            result = requests.request(method,
                                      requesturl,
                                      headers=headers,
                                      data=data,
                                      verify=verify_ssl,
                                      stream=True,
                                      timeout=300)
        else:
            result = requests.request(method,
                                      requesturl,
                                      headers=headers,
                                      data=data,
                                      verify=verify_ssl,
                                      timeout=300)
    finally:
        if fh is not None:
            fh.close()

    err_code = None
    err_msg = None
    if result.status_code >= 400:
        # On error the S3 API response should contain error message
        err_text = result.content or 'Unknown error'
        log.debug('    Response content: %s', err_text)

        # Try to get err info from response xml
        try:
            err_data = xml.to_dict(ET.fromstring(err_text))
            err_code = err_data['Code']
            err_msg = err_data['Message']
        except (KeyError, ET.ParseError) as err:
            log.debug(
                'Failed to parse s3 err response. %s: %s',
                type(err).__name__, err
            )
            err_code = 'http-{0}'.format(result.status_code)
            err_msg = err_text

    if os.environ.get('MOCK_SLOW_DOWN'):
        result.status_code = 503
        err_code = 'SlowDown'
        err_msg = 'MOCK_SLOW_DOWN environment variable set. All S3 queries will fail for testing purposes.'

    log.debug('S3 Response Status Code: %s', result.status_code)

    if method == 'PUT':
        if result.status_code != 200:
            if local_file:
                raise CommandExecutionError(
                    'Failed to upload from {0} to {1}. {2}: {3}'.format(
                        local_file, path, err_code, err_msg))
            raise CommandExecutionError(
                'Failed to create bucket {0}. {1}: {2}'.format(
                    bucket, err_code, err_msg))

        if local_file:
            log.debug('Uploaded from %s to %s', local_file, path)
        else:
            log.debug('Created bucket %s', bucket)
        return None

    if method == 'DELETE':
        if not six.text_type(result.status_code).startswith('2'):
            if path:
                raise CommandExecutionError(
                    'Failed to delete {0} from bucket {1}. {2}: {3}'.format(
                        path, bucket, err_code, err_msg))
            raise CommandExecutionError(
                'Failed to delete bucket {0}. {1}: {2}'.format(
                    bucket, err_code, err_msg))

        if path:
            log.debug('Deleted %s from bucket %s', path, bucket)
        else:
            log.debug('Deleted bucket %s', bucket)
        return None

    sortof_ok = ['SlowDown', 'ServiceUnavailable', 'RequestTimeTooSkewed',
        'RequestTimeout', 'OperationAborted', 'InternalError',
        'AccessDenied']

    # This can be used to save a binary object to disk
    if local_file and method == 'GET':
        if result.status_code < 200 or result.status_code >= 300:
            if err_code in sortof_ok:
                log.error('Failed to get file=%s. %s: %s', path, err_code, err_msg)
                return None
            raise CommandExecutionError(
                'Failed to get file=%s. {0}: {1}'.format(path, err_code, err_msg))

        log.debug('Saving to local file: %s', local_file)
        with salt.utils.files.fopen(local_file, 'wb') as out:
            for chunk in result.iter_content(chunk_size=chunk_size):
                out.write(chunk)
        return 'Saved to local file: {0}'.format(local_file)

    if result.status_code < 200 or result.status_code >= 300:
        if err_code in sortof_ok:
            log.error('Failed s3 operation. %s: %s', err_code, err_msg)
            return None
        raise CommandExecutionError(
            'Failed s3 operation. {0}: {1}'.format(err_code, err_msg))

    # This can be used to return a binary object wholesale
    if return_bin:
        return result.content

    if result.content:
        items = ET.fromstring(result.content)

        ret = []
        for item in items:
            ret.append(xml.to_dict(item))

        if return_url is True:
            return ret, requesturl
    else:
        if result.status_code != requests.codes.ok:
            return None
        ret = {'headers': []}
        if full_headers:
            ret['headers'] = dict(result.headers)
        else:
            for header in result.headers:
                ret['headers'].append(header.strip())

    return ret
示例#36
0
def query(key, keyid, method='GET', params=None, headers=None,
          requesturl=None, return_url=False, bucket=None, service_url=None,
          path=None, return_bin=False, action=None, local_file=None,
          verify_ssl=True):
    '''
    Perform a query against an S3-like API. This function requires that a
    secret key and the id for that key are passed in. For instance:

        s3.keyid: GKTADJGHEIQSXMKKRBJ08H
        s3.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs

    A service_url may also be specified in the configuration::

        s3.service_url: s3.amazonaws.com

    If a service_url is not specified, the default is s3.amazonaws.com. This
    may appear in various documentation as an "endpoint". A comprehensive list
    for Amazon S3 may be found at::

        http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region

    The service_url will form the basis for the final endpoint that is used to
    query the service.

    SSL verification may also be turned off in the configuration:

    s3.verify_ssl: False

    This is required if using S3 bucket names that contain a period, as
    these will not match Amazon's S3 wildcard certificates. Certificate
    verification is enabled by default.
    '''
    if not headers:
        headers = {}

    if not params:
        params = {}

    if path is None:
        path = ''

    if not service_url:
        service_url = 's3.amazonaws.com'

    if bucket:
        endpoint = '{0}.{1}'.format(bucket, service_url)
    else:
        endpoint = service_url

    # Try grabbing the credentials from the EC2 instance IAM metadata if available
    token = None
    if not key or not keyid:
        iam_creds = iam.get_iam_metadata()
        key = iam_creds['secret_key']
        keyid = iam_creds['access_key']
        token = iam_creds['security_token']

    if not requesturl:
        x_amz_date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
        content_type = 'text/plain'
        if method == 'GET':
            if bucket:
                can_resource = '/{0}/{1}'.format(bucket, path)
            else:
                can_resource = '/'
        elif method == 'PUT' or method == 'HEAD' or method == 'DELETE':
            if path:
                can_resource = '/{0}/{1}'.format(bucket, path)
            else:
                can_resource = '/{0}/'.format(bucket)

        if action:
            can_resource += '?{0}'.format(action)

        log.debug('CanonicalizedResource: {0}'.format(can_resource))

        headers['Host'] = endpoint
        headers['Content-type'] = content_type
        headers['Date'] = x_amz_date
        if token:
            headers['x-amz-security-token'] = token

        string_to_sign = '{0}\n'.format(method)

        new_headers = []
        for header in sorted(headers):
            if header.lower().startswith('x-amz'):
                log.debug(header.lower())
                new_headers.append('{0}:{1}'.format(header.lower(),
                                                    headers[header]))
        can_headers = '\n'.join(new_headers)
        log.debug('CanonicalizedAmzHeaders: {0}'.format(can_headers))

        string_to_sign += '\n{0}'.format(content_type)
        string_to_sign += '\n{0}'.format(x_amz_date)
        if can_headers:
            string_to_sign += '\n{0}'.format(can_headers)
        string_to_sign += '\n{0}'.format(can_resource)
        log.debug('String To Sign:: \n{0}'.format(string_to_sign))

        hashed = hmac.new(key, string_to_sign, hashlib.sha1)
        sig = binascii.b2a_base64(hashed.digest())
        headers['Authorization'] = 'AWS {0}:{1}'.format(keyid, sig.strip())

        querystring = urllib.urlencode(params)
        if action:
            if querystring:
                querystring = '{0}&{1}'.format(action, querystring)
            else:
                querystring = action
        requesturl = 'https://{0}/'.format(endpoint)
        if path:
            requesturl += path
        if querystring:
            requesturl += '?{0}'.format(querystring)

    if method == 'PUT':
        if local_file:
            with salt.utils.fopen(local_file, 'r') as ifile:
                data = ifile.read()

    log.debug('S3 Request: {0}'.format(requesturl))
    log.debug('S3 Headers::')
    log.debug('    Authorization: {0}'.format(headers['Authorization']))

    try:
        result = requests.request(method, requesturl, headers=headers,
                                  verify=verify_ssl)
        response = result.content
    except requests.exceptions.HTTPError as exc:
        log.error('There was an error::')
        if hasattr(exc, 'code') and hasattr(exc, 'msg'):
            log.error('    Code: {0}: {1}'.format(exc.code, exc.msg))
        log.error('    Content: \n{0}'.format(exc.read()))
        return False

    log.debug('S3 Response Status Code: {0}'.format(result.status_code))

    if method == 'PUT':
        if result.getcode() == 200:
            if local_file:
                log.debug('Uploaded from {0} to {1}'.format(local_file, path))
            else:
                log.debug('Created bucket {0}'.format(bucket))
        else:
            if local_file:
                log.debug('Failed to upload from {0} to {1}: {2}'.format(
                                                    local_file,
                                                    path,
                                                    result.getcode(),
                                                    ))
            else:
                log.debug('Failed to create bucket {0}'.format(bucket))
        return

    if method == 'DELETE':
        if str(result.getcode()).startswith('2'):
            if path:
                log.debug('Deleted {0} from bucket {1}'.format(path, bucket))
            else:
                log.debug('Deleted bucket {0}'.format(bucket))
        else:
            if path:
                log.debug('Failed to delete {0} from bucket {1}: {2}'.format(
                                                    path,
                                                    bucket,
                                                    result.getcode(),
                                                    ))
            else:
                log.debug('Failed to delete bucket {0}'.format(bucket))
        return

    # This can be used to save a binary object to disk
    if local_file and method == 'GET':
        log.debug('Saving to local file: {0}'.format(local_file))
        with salt.utils.fopen(local_file, 'w') as out:
            out.write(response)
        return 'Saved to local file: {0}'.format(local_file)

    # This can be used to return a binary object wholesale
    if return_bin:
        return response

    if response:
        items = ET.fromstring(response)

        ret = []
        for item in items:
            ret.append(xml.to_dict(item))

        if return_url is True:
            return ret, requesturl
    else:
        if method == 'GET' or method == 'HEAD':
            return
        ret = {'headers': []}
        for header in result.headers:
            ret['headers'].append(header.strip())

    return ret
示例#37
0
def query(key,
          keyid,
          method='GET',
          params=None,
          headers=None,
          requesturl=None,
          return_url=False,
          bucket=None,
          service_url=None,
          path='',
          return_bin=False,
          action=None,
          local_file=None,
          verify_ssl=True,
          full_headers=False,
          kms_keyid=None,
          location=None,
          role_arn=None,
          chunk_size=16384):
    '''
    Perform a query against an S3-like API. This function requires that a
    secret key and the id for that key are passed in. For instance:

        s3.keyid: GKTADJGHEIQSXMKKRBJ08H
        s3.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs

    If keyid or key is not specified, an attempt to fetch them from EC2 IAM
    metadata service will be made.

    A service_url may also be specified in the configuration:

        s3.service_url: s3.amazonaws.com

    If a service_url is not specified, the default is s3.amazonaws.com. This
    may appear in various documentation as an "endpoint". A comprehensive list
    for Amazon S3 may be found at::

        http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region

    The service_url will form the basis for the final endpoint that is used to
    query the service.

    SSL verification may also be turned off in the configuration:

    s3.verify_ssl: False

    This is required if using S3 bucket names that contain a period, as
    these will not match Amazon's S3 wildcard certificates. Certificate
    verification is enabled by default.

    A region may be specified:

        s3.location: eu-central-1

    If region is not specified, an attempt to fetch the region from EC2 IAM
    metadata service will be made. Failing that, default is us-east-1
    '''
    if not HAS_REQUESTS:
        log.error('There was an error: requests is required for s3 access')

    if not headers:
        headers = {}

    if not params:
        params = {}

    if not service_url:
        service_url = 's3.amazonaws.com'

    if bucket:
        endpoint = '{0}.{1}'.format(bucket, service_url)
    else:
        endpoint = service_url

    # Try grabbing the credentials from the EC2 instance IAM metadata if available
    if not key:
        key = salt.utils.aws.IROLE_CODE

    if not keyid:
        keyid = salt.utils.aws.IROLE_CODE

    if kms_keyid is not None and method in ('PUT', 'POST'):
        headers['x-amz-server-side-encryption'] = 'aws:kms'
        headers['x-amz-server-side-encryption-aws-kms-key-id'] = kms_keyid

    if not location:
        location = __utils__['aws.get_location']()

    data = ''
    payload_hash = None
    if method == 'PUT':
        if local_file:
            payload_hash = salt.utils.get_hash(local_file, form='sha256')

    if path is None:
        path = ''

    if not requesturl:
        requesturl = 'https://{0}/{1}'.format(endpoint, path)
        headers, requesturl = salt.utils.aws.sig4(
            method,
            endpoint,
            params,
            data=data,
            uri='/{0}'.format(path),
            prov_dict={
                'id': keyid,
                'key': key
            },
            role_arn=role_arn,
            location=location,
            product='s3',
            requesturl=requesturl,
            headers=headers,
            payload_hash=payload_hash,
        )

    log.debug('S3 Request: {0}'.format(requesturl))
    log.debug('S3 Headers::')
    log.debug('    Authorization: {0}'.format(headers['Authorization']))

    if not data:
        data = None

    response = None
    if method == 'PUT':
        if local_file:
            data = salt.utils.fopen(local_file, 'r')
        result = requests.request(method,
                                  requesturl,
                                  headers=headers,
                                  data=data,
                                  verify=verify_ssl,
                                  stream=True)
        response = result.content
    elif method == 'GET' and local_file and not return_bin:
        result = requests.request(method,
                                  requesturl,
                                  headers=headers,
                                  data=data,
                                  verify=verify_ssl,
                                  stream=True)
    else:
        result = requests.request(method,
                                  requesturl,
                                  headers=headers,
                                  data=data,
                                  verify=verify_ssl)
        response = result.content

    err_code = None
    err_msg = None
    if result.status_code >= 400:
        # On error the S3 API response should contain error message
        err_text = response or result.content or 'Unknown error'
        log.debug('    Response content: {0}'.format(err_text))

        # Try to get err info from response xml
        try:
            err_data = xml.to_dict(ET.fromstring(err_text))
            err_code = err_data['Code']
            err_msg = err_data['Message']
        except (KeyError, ET.ParseError) as err:
            log.debug('Failed to parse s3 err response. {0}: {1}'.format(
                type(err).__name__, err))
            err_code = 'http-{0}'.format(result.status_code)
            err_msg = err_text

    log.debug('S3 Response Status Code: {0}'.format(result.status_code))

    if method == 'PUT':
        if result.status_code != 200:
            if local_file:
                raise CommandExecutionError(
                    'Failed to upload from {0} to {1}. {2}: {3}'.format(
                        local_file, path, err_code, err_msg))
            raise CommandExecutionError(
                'Failed to create bucket {0}. {1}: {2}'.format(
                    bucket, err_code, err_msg))

        if local_file:
            log.debug('Uploaded from {0} to {1}'.format(local_file, path))
        else:
            log.debug('Created bucket {0}'.format(bucket))
        return

    if method == 'DELETE':
        if not str(result.status_code).startswith('2'):
            if path:
                raise CommandExecutionError(
                    'Failed to delete {0} from bucket {1}. {2}: {3}'.format(
                        path, bucket, err_code, err_msg))
            raise CommandExecutionError(
                'Failed to delete bucket {0}. {1}: {2}'.format(
                    bucket, err_code, err_msg))

        if path:
            log.debug('Deleted {0} from bucket {1}'.format(path, bucket))
        else:
            log.debug('Deleted bucket {0}'.format(bucket))
        return

    # This can be used to save a binary object to disk
    if local_file and method == 'GET':
        if result.status_code < 200 or result.status_code >= 300:
            raise CommandExecutionError('Failed to get file. {0}: {1}'.format(
                err_code, err_msg))

        log.debug('Saving to local file: {0}'.format(local_file))
        with salt.utils.fopen(local_file, 'wb') as out:
            for chunk in result.iter_content(chunk_size=chunk_size):
                out.write(chunk)
        return 'Saved to local file: {0}'.format(local_file)

    if result.status_code < 200 or result.status_code >= 300:
        raise CommandExecutionError('Failed s3 operation. {0}: {1}'.format(
            err_code, err_msg))

    # This can be used to return a binary object wholesale
    if return_bin:
        return response

    if response:
        items = ET.fromstring(response)

        ret = []
        for item in items:
            ret.append(xml.to_dict(item))

        if return_url is True:
            return ret, requesturl
    else:
        if result.status_code != requests.codes.ok:
            return
        ret = {'headers': []}
        if full_headers:
            ret['headers'] = dict(result.headers)
        else:
            for header in result.headers:
                ret['headers'].append(header.strip())

    return ret
示例#38
0
 def test_boot_default_dev(self):
     diskp = virt._disk_profile('default', 'kvm')
     nicp = virt._nic_profile('default', 'kvm')
     xml_data = virt._gen_xml('hello', 1, 512, diskp, nicp, 'kvm')
     root = ET.fromstring(xml_data)
     self.assertEqual(root.find('os/boot').attrib['dev'], 'hd')
示例#39
0
def call(payload=None):
    '''
    This function captures the query string and sends it to the Palo Alto device.
    '''
    r = None
    try:
        if DETAILS['method'] == 'dev_key':
            # Pass the api key without the target declaration
            conditional_payload = {'key': DETAILS['apikey']}
            payload.update(conditional_payload)
            r = __utils__['http.query'](DETAILS['url'],
                                        data=payload,
                                        method='POST',
                                        decode_type='plain',
                                        decode=True,
                                        verify_ssl=DETAILS["verify_ssl"],
                                        raise_error=True)
        elif DETAILS['method'] == 'dev_pass':
            # Pass credentials without the target declaration
            r = __utils__['http.query'](DETAILS['url'],
                                        username=DETAILS['username'],
                                        password=DETAILS['password'],
                                        data=payload,
                                        method='POST',
                                        decode_type='plain',
                                        decode=True,
                                        verify_ssl=DETAILS["verify_ssl"],
                                        raise_error=True)
        elif DETAILS['method'] == 'pan_key':
            # Pass the api key with the target declaration
            conditional_payload = {
                'key': DETAILS['apikey'],
                'target': DETAILS['serial']
            }
            payload.update(conditional_payload)
            r = __utils__['http.query'](DETAILS['url'],
                                        data=payload,
                                        method='POST',
                                        decode_type='plain',
                                        decode=True,
                                        verify_ssl=DETAILS["verify_ssl"],
                                        raise_error=True)
        elif DETAILS['method'] == 'pan_pass':
            # Pass credentials with the target declaration
            conditional_payload = {'target': DETAILS['serial']}
            payload.update(conditional_payload)
            r = __utils__['http.query'](DETAILS['url'],
                                        username=DETAILS['username'],
                                        password=DETAILS['password'],
                                        data=payload,
                                        method='POST',
                                        decode_type='plain',
                                        decode=True,
                                        verify_ssl=DETAILS["verify_ssl"],
                                        raise_error=True)
    except KeyError as err:
        raise salt.exceptions.CommandExecutionError(
            "Did not receive a valid response from host.")

    if not r:
        raise salt.exceptions.CommandExecutionError(
            "Did not receive a valid response from host.")

    xmldata = ET.fromstring(r['text'])

    # If we are pulling the candidate configuration, we need to strip the dirtyId
    if payload['type'] == 'config' and payload['action'] == 'get':
        xmldata = (_strip_dirty(xmldata))

    return xml.to_dict(xmldata, True)
示例#40
0
def show_employee(emp_id, fields=None):
    '''
    Show all employees for this company.

    CLI Example:

        salt myminion bamboohr.show_employee 1138

    By default, the fields normally returned from bamboohr.list_employees are
    returned. These fields are:

        - canUploadPhoto
        - department
        - displayName
        - firstName
        - id
        - jobTitle
        - lastName
        - location
        - mobilePhone
        - nickname
        - photoUploaded
        - photoUrl
        - workEmail
        - workPhone
        - workPhoneExtension

    If needed, a different set of fields may be specified, separated by commas:

    CLI Example:

        salt myminion bamboohr.show_employee 1138 displayName,dateOfBirth

    A list of available fields can be found at
    http://www.bamboohr.com/api/documentation/employees.php
    '''
    ret = {}
    if fields is None:
        fields = ','.join((
            'canUploadPhoto',
            'department',
            'displayName',
            'firstName',
            'id',
            'jobTitle',
            'lastName',
            'location',
            'mobilePhone',
            'nickname',
            'photoUploaded',
            'photoUrl',
            'workEmail',
            'workPhone',
            'workPhoneExtension',
        ))

    status, result = _query(action='employees',
                            command=emp_id,
                            args={'fields': fields})

    root = ET.fromstring(result)
    items = root.getchildren()

    ret = {'id': emp_id}
    for item in items:
        ret[item.items()[0][1]] = item.text
    return ret
示例#41
0
def security_rule_exists(
    name,
    rulename=None,
    vsys="1",
    action=None,
    disabled=None,
    sourcezone=None,
    destinationzone=None,
    source=None,
    destination=None,
    application=None,
    service=None,
    description=None,
    logsetting=None,
    logstart=None,
    logend=None,
    negatesource=None,
    negatedestination=None,
    profilegroup=None,
    datafilter=None,
    fileblock=None,
    spyware=None,
    urlfilter=None,
    virus=None,
    vulnerability=None,
    wildfire=None,
    move=None,
    movetarget=None,
    commit=False,
):
    """
    Ensures that a security rule exists on the device. Also, ensure that all configurations are set appropriately.

    This method will create the rule if it does not exist. If the rule does exist, it will ensure that the
    configurations are set appropriately.

    If the rule does not exist and is created, any value that is not provided will be provided as the default.
    The action, to, from, source, destination, application, and service fields are mandatory and must be provided.

    This will enforce the exact match of the rule. For example, if the rule is currently configured with the log-end
    option, but this option is not specified in the state method, it will be removed and reset to the system default.

    It is strongly recommended to specify all options to ensure proper operation.

    When defining the profile group settings, the device can only support either a profile group or individual settings.
    If both are specified, the profile group will be preferred and the individual settings are ignored. If neither are
    specified, the value will be set to system default of none.

    name: The name of the module function to execute.

    rulename(str): The name of the security rule.  The name is case-sensitive and can have up to 31 characters, which
    can be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on Panorama,
    unique within its device group and any ancestor or descendant device groups.

    vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1.

    action(str): The action that the security rule will enforce. Valid options are: allow, deny, drop, reset-client,
    reset-server, reset-both.

    disabled(bool): Controls if the rule is disabled. Set 'True' to disable and 'False' to enable.

    sourcezone(str, list): The source zone(s). The value 'any' will match all zones.

    destinationzone(str, list): The destination zone(s). The value 'any' will match all zones.

    source(str, list): The source address(es). The value 'any' will match all addresses.

    destination(str, list): The destination address(es). The value 'any' will match all addresses.

    application(str, list): The application(s) matched. The value 'any' will match all applications.

    service(str, list): The service(s) matched. The value 'any' will match all services. The value
    'application-default' will match based upon the application defined ports.

    description(str): A description for the policy (up to 255 characters).

    logsetting(str): The name of a valid log forwarding profile.

    logstart(bool): Generates a traffic log entry for the start of a session (disabled by default).

    logend(bool): Generates a traffic log entry for the end of a session (enabled by default).

    negatesource(bool): Match all but the specified source addresses.

    negatedestination(bool): Match all but the specified destination addresses.

    profilegroup(str): A valid profile group name.

    datafilter(str): A valid data filter profile name. Ignored with the profilegroup option set.

    fileblock(str): A valid file blocking profile name. Ignored with the profilegroup option set.

    spyware(str): A valid spyware profile name. Ignored with the profilegroup option set.

    urlfilter(str): A valid URL filtering profile name. Ignored with the profilegroup option set.

    virus(str): A valid virus profile name. Ignored with the profilegroup option set.

    vulnerability(str): A valid vulnerability profile name. Ignored with the profilegroup option set.

    wildfire(str): A valid vulnerability profile name. Ignored with the profilegroup option set.

    move(str): An optional argument that ensure the rule is moved to a specific location. Valid options are 'top',
    'bottom', 'before', or 'after'. The 'before' and 'after' options require the use of the 'movetarget' argument
    to define the location of the move request.

    movetarget(str): An optional argument that defines the target of the move operation if the move argument is
    set to 'before' or 'after'.

    commit(bool): If true the firewall will commit the changes, if false do not commit changes.

    SLS Example:

    .. code-block:: yaml

        panos/rulebase/security/rule01:
            panos.security_rule_exists:
              - rulename: rule01
              - vsys: 1
              - action: allow
              - disabled: False
              - sourcezone: untrust
              - destinationzone: trust
              - source:
                - 10.10.10.0/24
                - 1.1.1.1
              - destination:
                - 2.2.2.2-2.2.2.4
              - application:
                - any
              - service:
                - tcp-25
              - description: My test security rule
              - logsetting: logprofile
              - logstart: False
              - logend: True
              - negatesource: False
              - negatedestination: False
              - profilegroup: myprofilegroup
              - move: top
              - commit: False

        panos/rulebase/security/rule01:
            panos.security_rule_exists:
              - rulename: rule01
              - vsys: 1
              - action: allow
              - disabled: False
              - sourcezone: untrust
              - destinationzone: trust
              - source:
                - 10.10.10.0/24
                - 1.1.1.1
              - destination:
                - 2.2.2.2-2.2.2.4
              - application:
                - any
              - service:
                - tcp-25
              - description: My test security rule
              - logsetting: logprofile
              - logstart: False
              - logend: False
              - datafilter: foobar
              - fileblock: foobar
              - spyware: foobar
              - urlfilter: foobar
              - virus: foobar
              - vulnerability: foobar
              - wildfire: foobar
              - move: after
              - movetarget: rule02
              - commit: False
    """
    ret = _default_ret(name)

    if not rulename:
        return ret

    # Check if rule currently exists
    rule = __salt__["panos.get_security_rule"](rulename, vsys)["result"]

    if rule and "entry" in rule:
        rule = rule["entry"]
    else:
        rule = {}

    # Build the rule element
    element = ""
    if sourcezone:
        element += "<from>{0}</from>".format(_build_members(sourcezone, True))
    else:
        ret.update({"comment": "The sourcezone field must be provided."})
        return ret

    if destinationzone:
        element += "<to>{0}</to>".format(_build_members(destinationzone, True))
    else:
        ret.update({"comment": "The destinationzone field must be provided."})
        return ret

    if source:
        element += "<source>{0}</source>".format(_build_members(source, True))
    else:
        ret.update({"comment": "The source field must be provided."})
        return

    if destination:
        element += "<destination>{0}</destination>".format(
            _build_members(destination, True))
    else:
        ret.update({"comment": "The destination field must be provided."})
        return ret

    if application:
        element += "<application>{0}</application>".format(
            _build_members(application, True))
    else:
        ret.update({"comment": "The application field must be provided."})
        return ret

    if service:
        element += "<service>{0}</service>".format(
            _build_members(service, True))
    else:
        ret.update({"comment": "The service field must be provided."})
        return ret

    if action:
        element += "<action>{0}</action>".format(action)
    else:
        ret.update({"comment": "The action field must be provided."})
        return ret

    if disabled is not None:
        if disabled:
            element += "<disabled>yes</disabled>"
        else:
            element += "<disabled>no</disabled>"

    if description:
        element += "<description>{0}</description>".format(description)

    if logsetting:
        element += "<log-setting>{0}</log-setting>".format(logsetting)

    if logstart is not None:
        if logstart:
            element += "<log-start>yes</log-start>"
        else:
            element += "<log-start>no</log-start>"

    if logend is not None:
        if logend:
            element += "<log-end>yes</log-end>"
        else:
            element += "<log-end>no</log-end>"

    if negatesource is not None:
        if negatesource:
            element += "<negate-source>yes</negate-source>"
        else:
            element += "<negate-source>no</negate-source>"

    if negatedestination is not None:
        if negatedestination:
            element += "<negate-destination>yes</negate-destination>"
        else:
            element += "<negate-destination>no</negate-destination>"

    # Build the profile settings
    profile_string = None
    if profilegroup:
        profile_string = "<group><member>{0}</member></group>".format(
            profilegroup)
    else:
        member_string = ""
        if datafilter:
            member_string += "<data-filtering><member>{0}</member></data-filtering>".format(
                datafilter)
        if fileblock:
            member_string += "<file-blocking><member>{0}</member></file-blocking>".format(
                fileblock)
        if spyware:
            member_string += "<spyware><member>{0}</member></spyware>".format(
                spyware)
        if urlfilter:
            member_string += "<url-filtering><member>{0}</member></url-filtering>".format(
                urlfilter)
        if virus:
            member_string += "<virus><member>{0}</member></virus>".format(
                virus)
        if vulnerability:
            member_string += "<vulnerability><member>{0}</member></vulnerability>".format(
                vulnerability)
        if wildfire:
            member_string += "<wildfire-analysis><member>{0}</member></wildfire-analysis>".format(
                wildfire)
        if member_string != "":
            profile_string = "<profiles>{0}</profiles>".format(member_string)

    if profile_string:
        element += "<profile-setting>{0}</profile-setting>".format(
            profile_string)

    full_element = "<entry name='{0}'>{1}</entry>".format(rulename, element)

    new_rule = xml.to_dict(ET.fromstring(full_element), True)

    config_change = False

    if rule == new_rule:
        ret.update(
            {"comment": "Security rule already exists. No changes required."})
    else:
        config_change = True
        xpath = (
            "/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{0}']/rulebase/"
            "security/rules/entry[@name='{1}']".format(vsys, rulename))

        result, msg = _edit_config(xpath, full_element)

        if not result:
            ret.update({"comment": msg})
            return ret

        ret.update({
            "changes": {
                "before": rule,
                "after": new_rule
            },
            "comment": "Security rule verified successfully.",
        })

    if move:
        movepath = (
            "/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{0}']/rulebase/"
            "security/rules/entry[@name='{1}']".format(vsys, rulename))
        move_result = False
        move_msg = ""
        if move == "before" and movetarget:
            move_result, move_msg = _move_before(movepath, movetarget)
        elif move == "after":
            move_result, move_msg = _move_after(movepath, movetarget)
        elif move == "top":
            move_result, move_msg = _move_top(movepath)
        elif move == "bottom":
            move_result, move_msg = _move_bottom(movepath)

        if config_change:
            ret.update({
                "changes": {
                    "before": rule,
                    "after": new_rule,
                    "move": move_msg
                }
            })
        else:
            ret.update({"changes": {"move": move_msg}})

        if not move_result:
            ret.update({"comment": move_msg})
            return ret

    if commit is True:
        ret.update({"commit": __salt__["panos.commit"](), "result": True})
    else:
        ret.update({"result": True})

    return ret
示例#42
0
def test_gen_xml_isolated():
    """
    Test the virt._gen_net_xml() function for an isolated network
    """
    xml_data = virt._gen_net_xml("network", "main", None, None)
    assert ET.fromstring(xml_data).find("forward") is None
示例#43
0
def test_gen_clock():
    """
    Test the virt._gen_xml clock property
    """
    # Localtime with adjustment
    xml_data = virt._gen_xml(
        virt.libvirt.openAuth.return_value,
        "hello",
        1,
        512,
        {},
        {},
        "kvm",
        "hvm",
        "x86_64",
        clock={
            "adjustment": 3600,
            "utc": False
        },
    )
    root = ET.fromstring(xml_data)
    assert "localtime" == root.find("clock").get("offset")
    assert "3600" == root.find("clock").get("adjustment")

    # Specific timezone
    xml_data = virt._gen_xml(
        virt.libvirt.openAuth.return_value,
        "hello",
        1,
        512,
        {},
        {},
        "kvm",
        "hvm",
        "x86_64",
        clock={"timezone": "CEST"},
    )
    root = ET.fromstring(xml_data)
    assert "timezone" == root.find("clock").get("offset")
    assert "CEST" == root.find("clock").get("timezone")

    # UTC
    xml_data = virt._gen_xml(
        virt.libvirt.openAuth.return_value,
        "hello",
        1,
        512,
        {},
        {},
        "kvm",
        "hvm",
        "x86_64",
        clock={"utc": True},
    )
    root = ET.fromstring(xml_data)
    assert "utc" == root.find("clock").get("offset")

    # Timers
    xml_data = virt._gen_xml(
        virt.libvirt.openAuth.return_value,
        "hello",
        1,
        512,
        {},
        {},
        "kvm",
        "hvm",
        "x86_64",
        clock={
            "timers": {
                "tsc": {
                    "frequency": 3504000000,
                    "mode": "native"
                },
                "rtc": {
                    "tickpolicy": "catchup",
                    "slew": 4636,
                    "threshold": 123,
                    "limit": 2342,
                },
                "hpet": {
                    "present": False
                },
            },
        },
    )
    root = ET.fromstring(xml_data)
    assert "utc" == root.find("clock").get("offset")
    assert "3504000000" == root.find("clock/timer[@name='tsc']").get(
        "frequency")
    assert "native" == root.find("clock/timer[@name='tsc']").get("mode")
    assert "catchup" == root.find("clock/timer[@name='rtc']").get("tickpolicy")
    assert {
        "slew": "4636",
        "threshold": "123",
        "limit": "2342"
    } == root.find("clock/timer[@name='rtc']/catchup").attrib
    assert "no" == root.find("clock/timer[@name='hpet']").get("present")
示例#44
0
def test_gen_xml_nat():
    """
    Test virt._get_net_xml() in a nat setup
    """
    xml_data = virt._gen_net_xml(
        "network",
        "main",
        "nat",
        None,
        ip_configs=[
            {
                "cidr":
                "192.168.2.0/24",
                "dhcp_ranges": [
                    {
                        "start": "192.168.2.10",
                        "end": "192.168.2.25"
                    },
                    {
                        "start": "192.168.2.110",
                        "end": "192.168.2.125"
                    },
                ],
                "hosts": {
                    "192.168.2.10": {
                        "mac": "00:16:3e:77:e2:ed",
                        "name": "foo.example.com",
                    },
                },
                "bootp": {
                    "file": "pxeboot.img",
                    "server": "192.168.2.1"
                },
                "tftp":
                "/path/to/tftp",
            },
            {
                "cidr": "2001:db8:ca2:2::/64",
                "hosts": {
                    "2001:db8:ca2:2:3::1": {
                        "name": "paul"
                    },
                    "2001:db8:ca2:2:3::2": {
                        "id": "0:3:0:1:0:16:3e:11:22:33",
                        "name": "ralph",
                    },
                },
            },
        ],
        nat={
            "address": {
                "start": "1.2.3.4",
                "end": "1.2.3.10"
            },
            "port": {
                "start": 500,
                "end": 1000
            },
        },
        domain={
            "name": "acme.lab",
            "localOnly": True
        },
        mtu=9000,
    )
    root = ET.fromstring(xml_data)
    assert "network" == root.find("name").text
    assert "main" == root.find("bridge").attrib["name"]
    assert "nat" == root.find("forward").attrib["mode"]
    expected_ipv4 = ET.fromstring("""
        <ip family='ipv4' address='192.168.2.1' prefix='24'>
          <dhcp>
            <range start='192.168.2.10' end='192.168.2.25'/>
            <range start='192.168.2.110' end='192.168.2.125'/>
            <host ip='192.168.2.10' mac='00:16:3e:77:e2:ed' name='foo.example.com'/>
            <bootp file='pxeboot.img' server='192.168.2.1'/>
          </dhcp>
          <tftp root='/path/to/tftp'/>
        </ip>
        """)
    assert_xml_equals(expected_ipv4, root.find("./ip[@address='192.168.2.1']"))

    expected_ipv6 = ET.fromstring("""
        <ip family='ipv6' address='2001:db8:ca2:2::1' prefix='64'>
          <dhcp>
            <host ip='2001:db8:ca2:2:3::1' name='paul'/>
            <host ip='2001:db8:ca2:2:3::2' id='0:3:0:1:0:16:3e:11:22:33' name='ralph'/>
          </dhcp>
        </ip>
        """)
    assert_xml_equals(expected_ipv6,
                      root.find("./ip[@address='2001:db8:ca2:2::1']"))

    actual_nat = ET.tostring(xmlutil.strip_spaces(root.find("./forward/nat")))
    expected_nat = strip_xml("""
        <nat>
          <address start='1.2.3.4' end='1.2.3.10'/>
          <port start='500' end='1000'/>
        </nat>
        """)
    assert expected_nat == actual_nat

    assert {
        "name": "acme.lab",
        "localOnly": "yes"
    } == root.find("./domain").attrib
    assert "9000" == root.find("mtu").get("size")
示例#45
0
def test_update_xen_disk_volumes(make_mock_vm, make_mock_storage_pool):
    xml_def = """
        <domain type='xen'>
          <name>my_vm</name>
          <memory unit='KiB'>524288</memory>
          <currentMemory unit='KiB'>524288</currentMemory>
          <vcpu placement='static'>1</vcpu>
          <os>
            <type arch='x86_64'>linux</type>
            <kernel>/usr/lib/grub2/x86_64-xen/grub.xen</kernel>
          </os>
          <devices>
            <disk type='file' device='disk'>
              <driver name='qemu' type='qcow2' cache='none' io='native'/>
              <source file='/path/to/default/my_vm_system'/>
              <target dev='xvda' bus='xen'/>
            </disk>
            <disk type='block' device='disk'>
              <driver name='qemu' type='raw' cache='none' io='native'/>
              <source dev='/path/to/my-iscsi/unit:0:0:1'/>
              <target dev='xvdb' bus='xen'/>
            </disk>
            <controller type='xenbus' index='0'/>
          </devices>
        </domain>"""
    domain_mock = make_mock_vm(xml_def)
    make_mock_storage_pool("default", "dir", ["my_vm_system"])
    make_mock_storage_pool("my-iscsi", "iscsi", ["unit:0:0:1"])
    make_mock_storage_pool("vdb", "disk", ["vdb1"])

    ret = virt.update(
        "my_vm",
        disks=[
            {
                "name": "system",
                "pool": "default"
            },
            {
                "name": "iscsi-data",
                "pool": "my-iscsi",
                "source_file": "unit:0:0:1"
            },
            {
                "name": "vdb-data",
                "pool": "vdb",
                "source_file": "vdb1"
            },
            {
                "name": "file-data",
                "pool": "default",
                "size": "10240"
            },
        ],
    )

    assert ret["definition"]
    define_mock = virt.libvirt.openAuth().defineXML
    setxml = ET.fromstring(define_mock.call_args[0][0])
    assert "block" == setxml.find(".//disk[3]").get("type")
    assert "/path/to/vdb/vdb1" == setxml.find(".//disk[3]/source").get("dev")

    # Note that my_vm-file-data was not an existing volume before the update
    assert "file" == setxml.find(".//disk[4]").get("type")
    assert "/path/to/default/my_vm_file-data" == setxml.find(
        ".//disk[4]/source").get("file")
示例#46
0
def query(
    key,
    keyid,
    method="GET",
    params=None,
    headers=None,
    requesturl=None,
    return_url=False,
    bucket=None,
    service_url=None,
    path="",
    return_bin=False,
    action=None,
    local_file=None,
    verify_ssl=True,
    full_headers=False,
    kms_keyid=None,
    location=None,
    role_arn=None,
    chunk_size=16384,
    path_style=False,
    https_enable=True,
):
    """
    Perform a query against an S3-like API. This function requires that a
    secret key and the id for that key are passed in. For instance:

        s3.keyid: GKTADJGHEIQSXMKKRBJ08H
        s3.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs

    If keyid or key is not specified, an attempt to fetch them from EC2 IAM
    metadata service will be made.

    A service_url may also be specified in the configuration:

        s3.service_url: s3.amazonaws.com

    If a service_url is not specified, the default is s3.amazonaws.com. This
    may appear in various documentation as an "endpoint". A comprehensive list
    for Amazon S3 may be found at::

        http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region

    The service_url will form the basis for the final endpoint that is used to
    query the service.

    Path style can be enabled:

        s3.path_style: True

    This can be useful if you need to use salt with a proxy for an s3 compatible storage

    You can use either https protocol or http protocol:

        s3.https_enable: True

    SSL verification may also be turned off in the configuration:

        s3.verify_ssl: False

    This is required if using S3 bucket names that contain a period, as
    these will not match Amazon's S3 wildcard certificates. Certificate
    verification is enabled by default.

    A region may be specified:

        s3.location: eu-central-1

    If region is not specified, an attempt to fetch the region from EC2 IAM
    metadata service will be made. Failing that, default is us-east-1
    """
    if not HAS_REQUESTS:
        log.error("There was an error: requests is required for s3 access")

    if not headers:
        headers = {}

    if not params:
        params = {}

    if not service_url:
        service_url = "s3.amazonaws.com"

    if not bucket or path_style:
        endpoint = service_url
    else:
        endpoint = "{}.{}".format(bucket, service_url)

    if path_style and bucket:
        path = "{}/{}".format(bucket, path)

    # Try grabbing the credentials from the EC2 instance IAM metadata if available
    if not key:
        key = salt.utils.aws.IROLE_CODE

    if not keyid:
        keyid = salt.utils.aws.IROLE_CODE

    if kms_keyid is not None and method in ("PUT", "POST"):
        headers["x-amz-server-side-encryption"] = "aws:kms"
        headers["x-amz-server-side-encryption-aws-kms-key-id"] = kms_keyid

    if not location:
        location = salt.utils.aws.get_location()

    data = ""
    fh = None
    payload_hash = None
    if method == "PUT":
        if local_file:
            payload_hash = salt.utils.hashutils.get_hash(local_file, form="sha256")

    if path is None:
        path = ""
    path = urllib.parse.quote(path)

    if not requesturl:
        requesturl = (("https" if https_enable else "http") + "://{0}/{1}").format(
            endpoint, path
        )
        headers, requesturl = salt.utils.aws.sig4(
            method,
            endpoint,
            params,
            data=data,
            uri="/{}".format(path),
            prov_dict={"id": keyid, "key": key},
            role_arn=role_arn,
            location=location,
            product="s3",
            requesturl=requesturl,
            headers=headers,
            payload_hash=payload_hash,
        )

    log.debug("S3 Request: %s", requesturl)
    log.debug("S3 Headers::")
    log.debug("    Authorization: %s", headers["Authorization"])

    if not data:
        data = None

    try:
        if method == "PUT":
            if local_file:
                # pylint: disable=resource-leakage
                fh = salt.utils.files.fopen(local_file, "rb")
                # pylint: enable=resource-leakage
                data = fh.read()  # pylint: disable=resource-leakage
            result = requests.request(
                method,
                requesturl,
                headers=headers,
                data=data,
                verify=verify_ssl,
                stream=True,
                timeout=300,
            )
        elif method == "GET" and local_file and not return_bin:
            result = requests.request(
                method,
                requesturl,
                headers=headers,
                data=data,
                verify=verify_ssl,
                stream=True,
                timeout=300,
            )
        else:
            result = requests.request(
                method,
                requesturl,
                headers=headers,
                data=data,
                verify=verify_ssl,
                timeout=300,
            )
    finally:
        if fh is not None:
            fh.close()

    err_code = None
    err_msg = None
    if result.status_code >= 400:
        # On error the S3 API response should contain error message
        err_text = result.content or "Unknown error"
        log.debug("    Response content: %s", err_text)

        # Try to get err info from response xml
        try:
            err_data = xml.to_dict(ET.fromstring(err_text))
            err_code = err_data["Code"]
            err_msg = err_data["Message"]
        except (KeyError, ET.ParseError) as err:
            log.debug(
                "Failed to parse s3 err response. %s: %s", type(err).__name__, err
            )
            err_code = "http-{}".format(result.status_code)
            err_msg = err_text

    log.debug("S3 Response Status Code: %s", result.status_code)

    if method == "PUT":
        if result.status_code != 200:
            if local_file:
                raise CommandExecutionError(
                    "Failed to upload from {} to {}. {}: {}".format(
                        local_file, path, err_code, err_msg
                    )
                )
            raise CommandExecutionError(
                "Failed to create bucket {}. {}: {}".format(bucket, err_code, err_msg)
            )

        if local_file:
            log.debug("Uploaded from %s to %s", local_file, path)
        else:
            log.debug("Created bucket %s", bucket)
        return

    if method == "DELETE":
        if not str(result.status_code).startswith("2"):
            if path:
                raise CommandExecutionError(
                    "Failed to delete {} from bucket {}. {}: {}".format(
                        path, bucket, err_code, err_msg
                    )
                )
            raise CommandExecutionError(
                "Failed to delete bucket {}. {}: {}".format(bucket, err_code, err_msg)
            )

        if path:
            log.debug("Deleted %s from bucket %s", path, bucket)
        else:
            log.debug("Deleted bucket %s", bucket)
        return

    # This can be used to save a binary object to disk
    if local_file and method == "GET":
        if result.status_code < 200 or result.status_code >= 300:
            raise CommandExecutionError(
                "Failed to get file. {}: {}".format(err_code, err_msg)
            )

        log.debug("Saving to local file: %s", local_file)
        with salt.utils.files.fopen(local_file, "wb") as out:
            for chunk in result.iter_content(chunk_size=chunk_size):
                out.write(chunk)
        return "Saved to local file: {}".format(local_file)

    if result.status_code < 200 or result.status_code >= 300:
        raise CommandExecutionError(
            "Failed s3 operation. {}: {}".format(err_code, err_msg)
        )

    # This can be used to return a binary object wholesale
    if return_bin:
        return result.content

    if result.content:
        items = ET.fromstring(result.content)

        ret = []
        for item in items:
            ret.append(xml.to_dict(item))

        if return_url is True:
            return ret, requesturl
    else:
        if result.status_code != requests.codes.ok:
            return
        ret = {"headers": []}
        if full_headers:
            ret["headers"] = dict(result.headers)
        else:
            for header in result.headers:
                ret["headers"].append(header.strip())

    return ret
示例#47
0
def query(url,
          method='GET',
          params=None,
          data=None,
          data_file=None,
          header_dict=None,
          header_list=None,
          header_file=None,
          username=None,
          password=None,
          auth=None,
          decode=False,
          decode_type='auto',
          status=False,
          headers=False,
          text=False,
          cookies=None,
          cookie_jar=None,
          cookie_format='lwp',
          persist_session=False,
          session_cookie_jar=None,
          data_render=False,
          data_renderer=None,
          header_render=False,
          header_renderer=None,
          template_dict=None,
          test=False,
          test_url=None,
          node='minion',
          port=80,
          opts=None,
          backend='tornado',
          requests_lib=None,
          ca_bundle=None,
          verify_ssl=None,
          cert=None,
          text_out=None,
          headers_out=None,
          decode_out=None,
          stream=False,
          streaming_callback=None,
          handle=False,
          agent=USERAGENT,
          hide_fields=None,
          **kwargs):
    '''
    Query a resource, and decode the return data
    '''
    ret = {}

    if opts is None:
        if node == 'master':
            opts = salt.config.master_config(
                os.path.join(syspaths.CONFIG_DIR, 'master'))
        elif node == 'minion':
            opts = salt.config.minion_config(
                os.path.join(syspaths.CONFIG_DIR, 'minion'))
        else:
            opts = {}

    if requests_lib is None:
        requests_lib = opts.get('requests_lib', False)

    if requests_lib is True:
        log.warn('Please set "backend" to "requests" instead of setting '
                 '"requests_lib" to "True"')

        if HAS_REQUESTS is False:
            ret['error'] = ('http.query has been set to use requests, but the '
                            'requests library does not seem to be installed')
            log.error(ret['error'])
            return ret

        backend = 'requests'

    else:
        requests_log = logging.getLogger('requests')
        requests_log.setLevel(logging.WARNING)

    # Some libraries don't support separation of url and GET parameters
    # Don't need a try/except block, since Salt depends on tornado
    url_full = tornado.httputil.url_concat(url, params)

    if ca_bundle is None:
        ca_bundle = get_ca_bundle(opts)

    if verify_ssl is None:
        verify_ssl = opts.get('verify_ssl', True)

    if cert is None:
        cert = opts.get('cert', None)

    if data_file is not None:
        data = _render(data_file, data_render, data_renderer, template_dict,
                       opts)

    # Make sure no secret fields show up in logs
    log_url = sanitize_url(url_full, hide_fields)

    log.debug('Requesting URL {0} using {1} method'.format(log_url, method))
    if method == 'POST':
        # Make sure no secret fields show up in logs
        if isinstance(data, dict):
            log_data = data.copy()
            for item in data:
                for field in hide_fields:
                    if item == field:
                        log_data[item] = 'XXXXXXXXXX'
            log.trace('Request POST Data: {0}'.format(
                pprint.pformat(log_data)))
        else:
            log.trace('Request POST Data: {0}'.format(pprint.pformat(data)))

    if header_file is not None:
        header_tpl = _render(header_file, header_render, header_renderer,
                             template_dict, opts)
        if isinstance(header_tpl, dict):
            header_dict = header_tpl
        else:
            header_list = header_tpl.splitlines()

    if header_dict is None:
        header_dict = {}

    if header_list is None:
        header_list = []

    if cookie_jar is None:
        cookie_jar = os.path.join(opts.get('cachedir', syspaths.CACHE_DIR),
                                  'cookies.txt')
    if session_cookie_jar is None:
        session_cookie_jar = os.path.join(
            opts.get('cachedir', syspaths.CACHE_DIR), 'cookies.session.p')

    if persist_session is True and HAS_MSGPACK:
        # TODO: This is hackish; it will overwrite the session cookie jar with
        # all cookies from this one connection, rather than behaving like a
        # proper cookie jar. Unfortunately, since session cookies do not
        # contain expirations, they can't be stored in a proper cookie jar.
        if os.path.isfile(session_cookie_jar):
            with salt.utils.fopen(session_cookie_jar, 'rb') as fh_:
                session_cookies = msgpack.load(fh_)
            if isinstance(session_cookies, dict):
                header_dict.update(session_cookies)
        else:
            with salt.utils.fopen(session_cookie_jar, 'wb') as fh_:
                msgpack.dump('', fh_)

    for header in header_list:
        comps = header.split(':')
        if len(comps) < 2:
            continue
        header_dict[comps[0].strip()] = comps[1].strip()

    if not auth:
        if username and password:
            auth = (username, password)
        else:
            auth = None

    if agent == USERAGENT:
        agent = '{0} http.query()'.format(agent)
    header_dict['User-agent'] = agent

    if backend == 'requests':
        sess = requests.Session()
        sess.auth = auth
        sess.headers.update(header_dict)
        log.trace('Request Headers: {0}'.format(sess.headers))
        sess_cookies = sess.cookies
        sess.verify = verify_ssl
    elif backend == 'urllib2':
        sess_cookies = None
    else:
        # Tornado
        sess_cookies = None

    if cookies is not None:
        if cookie_format == 'mozilla':
            sess_cookies = salt.ext.six.moves.http_cookiejar.MozillaCookieJar(
                cookie_jar)
        else:
            sess_cookies = salt.ext.six.moves.http_cookiejar.LWPCookieJar(
                cookie_jar)
        if not os.path.isfile(cookie_jar):
            sess_cookies.save()
        sess_cookies.load()

    if test is True:
        if test_url is None:
            return {}
        else:
            url = test_url
            ret['test'] = True

    if backend == 'requests':
        req_kwargs = {}
        if stream is True:
            if requests.__version__[0] == '0':
                # 'stream' was called 'prefetch' before 1.0, with flipped meaning
                req_kwargs['prefetch'] = False
            else:
                req_kwargs['stream'] = True

        # Client-side cert handling
        if cert is not None:
            if isinstance(cert, six.string_types):
                if os.path.exists(cert):
                    req_kwargs['cert'] = cert
            elif isinstance(cert, tuple):
                if os.path.exists(cert[0]) and os.path.exists(cert[1]):
                    req_kwargs['cert'] = cert
            else:
                log.error(
                    'The client-side certificate path that was passed is '
                    'not valid: {0}'.format(cert))

        result = sess.request(method,
                              url,
                              params=params,
                              data=data,
                              **req_kwargs)
        result.raise_for_status()
        if stream is True or handle is True:
            return {'handle': result}

        log.debug('Final URL location of Response: {0}'.format(
            sanitize_url(result.url, hide_fields)))

        result_status_code = result.status_code
        result_headers = result.headers
        result_text = result.text
        result_cookies = result.cookies
    elif backend == 'urllib2':
        request = urllib_request.Request(url_full, data)
        handlers = [
            urllib_request.HTTPHandler,
            urllib_request.HTTPCookieProcessor(sess_cookies)
        ]

        if url.startswith('https') or port == 443:
            hostname = request.get_host()
            handlers[0] = urllib_request.HTTPSHandler(1)
            if not HAS_MATCHHOSTNAME:
                log.warn(
                    ('match_hostname() not available, SSL hostname checking '
                     'not available. THIS CONNECTION MAY NOT BE SECURE!'))
            elif verify_ssl is False:
                log.warn(('SSL certificate verification has been explicitly '
                          'disabled. THIS CONNECTION MAY NOT BE SECURE!'))
            else:
                sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                sock.connect((hostname, 443))
                sockwrap = ssl.wrap_socket(sock,
                                           ca_certs=ca_bundle,
                                           cert_reqs=ssl.CERT_REQUIRED)
                try:
                    match_hostname(sockwrap.getpeercert(), hostname)
                except CertificateError as exc:
                    ret['error'] = ('The certificate was invalid. '
                                    'Error returned was: {0}'.format(
                                        pprint.pformat(exc)))
                    return ret

                # Client-side cert handling
                if cert is not None:
                    cert_chain = None
                    if isinstance(cert, six.string_types):
                        if os.path.exists(cert):
                            cert_chain = (cert)
                    elif isinstance(cert, tuple):
                        if os.path.exists(cert[0]) and os.path.exists(cert[1]):
                            cert_chain = cert
                    else:
                        log.error('The client-side certificate path that was '
                                  'passed is not valid: {0}'.format(cert))
                        return
                    if hasattr(ssl, 'SSLContext'):
                        # Python >= 2.7.9
                        context = ssl.SSLContext.load_cert_chain(*cert_chain)
                        handlers.append(
                            urllib_request.HTTPSHandler(context=context))  # pylint: disable=E1123
                    else:
                        # Python < 2.7.9
                        cert_kwargs = {
                            'host': request.get_host(),
                            'port': port,
                            'cert_file': cert_chain[0]
                        }
                        if len(cert_chain) > 1:
                            cert_kwargs['key_file'] = cert_chain[1]
                        handlers[
                            0] = salt.ext.six.moves.http_client.HTTPSConnection(
                                **cert_kwargs)

        opener = urllib_request.build_opener(*handlers)
        for header in header_dict:
            request.add_header(header, header_dict[header])
        request.get_method = lambda: method
        try:
            result = opener.open(request)
        except URLError as exc:
            return {'Error': str(exc)}
        if stream is True or handle is True:
            return {'handle': result}

        result_status_code = result.code
        result_headers = result.headers.headers
        result_text = result.read()
    else:
        # Tornado
        req_kwargs = {}

        # Client-side cert handling
        if cert is not None:
            if isinstance(cert, six.string_types):
                if os.path.exists(cert):
                    req_kwargs['client_cert'] = cert
            elif isinstance(cert, tuple):
                if os.path.exists(cert[0]) and os.path.exists(cert[1]):
                    req_kwargs['client_cert'] = cert[0]
                    req_kwargs['client_key'] = cert[1]
            else:
                log.error(
                    'The client-side certificate path that was passed is '
                    'not valid: {0}'.format(cert))

        if isinstance(data, dict):
            data = urllib.urlencode(data)

        max_body = opts.get('http_max_body',
                            salt.config.DEFAULT_MINION_OPTS['http_max_body'])
        timeout = opts.get(
            'http_request_timeout',
            salt.config.DEFAULT_MINION_OPTS['http_request_timeout'])

        try:
            result = HTTPClient(max_body_size=max_body).fetch(
                url_full,
                method=method,
                headers=header_dict,
                auth_username=username,
                auth_password=password,
                body=data,
                validate_cert=verify_ssl,
                allow_nonstandard_methods=True,
                streaming_callback=streaming_callback,
                request_timeout=timeout,
                **req_kwargs)
        except tornado.httpclient.HTTPError as exc:
            ret['status'] = exc.code
            ret['error'] = str(exc)
            return ret

        if stream is True or handle is True:
            return {'handle': result}

        result_status_code = result.code
        result_headers = result.headers
        result_text = result.body
        if 'Set-Cookie' in result_headers.keys() and cookies is not None:
            result_cookies = parse_cookie_header(result_headers['Set-Cookie'])
            for item in result_cookies:
                sess_cookies.set_cookie(item)
        else:
            result_cookies = None

    if isinstance(result_headers, list):
        result_headers_dict = {}
        for header in result_headers:
            comps = header.split(':')
            result_headers_dict[comps[0].strip()] = ':'.join(comps[1:]).strip()
        result_headers = result_headers_dict

    log.debug('Response Status Code: {0}'.format(result_status_code))
    log.trace('Response Headers: {0}'.format(result_headers))
    log.trace('Response Cookies: {0}'.format(sess_cookies))
    try:
        log.trace('Response Text: {0}'.format(result_text))
    except UnicodeEncodeError as exc:
        log.trace(
            ('Cannot Trace Log Response Text: {0}. This may be due to '
             'incompatibilities between requests and logging.').format(exc))

    if text_out is not None and os.path.exists(text_out):
        with salt.utils.fopen(text_out, 'w') as tof:
            tof.write(result_text)

    if headers_out is not None and os.path.exists(headers_out):
        with salt.utils.fopen(headers_out, 'w') as hof:
            hof.write(result_headers)

    if cookies is not None:
        sess_cookies.save()

    if persist_session is True and HAS_MSGPACK:
        # TODO: See persist_session above
        if 'set-cookie' in result_headers:
            with salt.utils.fopen(session_cookie_jar, 'wb') as fh_:
                session_cookies = result_headers.get('set-cookie', None)
                if session_cookies is not None:
                    msgpack.dump({'Cookie': session_cookies}, fh_)
                else:
                    msgpack.dump('', fh_)

    if status is True:
        ret['status'] = result_status_code

    if headers is True:
        ret['headers'] = result_headers

    if decode is True:
        if decode_type == 'auto':
            content_type = result_headers.get('content-type',
                                              'application/json')
            if 'xml' in content_type:
                decode_type = 'xml'
            elif 'json' in content_type:
                decode_type = 'json'
            else:
                decode_type = 'plain'

        valid_decodes = ('json', 'xml', 'plain')
        if decode_type not in valid_decodes:
            ret['error'] = ('Invalid decode_type specified. '
                            'Valid decode types are: {0}'.format(
                                pprint.pformat(valid_decodes)))
            log.error(ret['error'])
            return ret

        if decode_type == 'json':
            ret['dict'] = json.loads(salt.utils.to_str(result_text))
        elif decode_type == 'xml':
            ret['dict'] = []
            items = ET.fromstring(result_text)
            for item in items:
                ret['dict'].append(xml.to_dict(item))
        else:
            text = True

        if decode_out and os.path.exists(decode_out):
            with salt.utils.fopen(decode_out, 'w') as dof:
                dof.write(result_text)

    if text is True:
        ret['text'] = result_text

    return ret
示例#48
0
 def test_pool(self):
     xml_data = virt._gen_pool_xml('pool', 'logical', 'base')
     root = ET.fromstring(xml_data)
     self.assertEqual(root.find('name').text, 'pool')
     self.assertEqual(root.attrib['type'], 'logical')
     self.assertEqual(root.find('target/path').text, '/dev/base')
示例#49
0
def query(action=None, command=None, args=None, method='GET', data=None):
    '''
    Make a web call to a Parallels provider
    '''
    path = config.get_cloud_config_value('url',
                                         get_configured_provider(),
                                         __opts__,
                                         search_global=False)
    auth_handler = urllib2.HTTPBasicAuthHandler()
    auth_handler.add_password(
        realm='Parallels Instance Manager',
        uri=path,
        user=config.get_cloud_config_value('user',
                                           get_configured_provider(),
                                           __opts__,
                                           search_global=False),
        passwd=config.get_cloud_config_value('password',
                                             get_configured_provider(),
                                             __opts__,
                                             search_global=False))
    opener = urllib2.build_opener(auth_handler)
    urllib2.install_opener(opener)

    if action:
        path += action

    if command:
        path += '/{0}'.format(command)

    if type(args) is not dict:
        args = {}

    kwargs = {'data': data}
    if type(data) is str and '<?xml' in data:
        kwargs['headers'] = {
            'Content-type': 'application/xml',
        }

    if args:
        path += '?%s'
        params = urllib.urlencode(args)
        req = urllib2.Request(url=path % params, **kwargs)
    else:
        req = urllib2.Request(url=path, **kwargs)

    req.get_method = lambda: method

    log.debug('{0} {1}'.format(method, req.get_full_url()))
    if data:
        log.debug(data)

    try:
        result = urllib2.urlopen(req)
        log.debug('PARALLELS Response Status Code: {0}'.format(
            result.getcode()))

        if 'content-length' in result.headers:
            content = result.read()
            result.close()
            items = ET.fromstring(content)
            return items

        return {}
    except urllib2.URLError as exc:
        log.error('PARALLELS Response Status Code: {0} {1}'.format(
            exc.code, exc.msg))
        root = ET.fromstring(exc.read())
        log.error(root)
        return {'error': root}
示例#50
0
def address_exists(
    name,
    addressname=None,
    vsys=1,
    ipnetmask=None,
    iprange=None,
    fqdn=None,
    description=None,
    commit=False,
):
    """
    Ensures that an address object exists in the configured state. If it does not exist or is not configured with the
    specified attributes, it will be adjusted to match the specified values.

    This module will only process a single address type (ip-netmask, ip-range, or fqdn). It will process the specified
    value if the following order: ip-netmask, ip-range, fqdn. For proper execution, only specify a single address
    type.

    name: The name of the module function to execute.

    addressname(str): The name of the address object.  The name is case-sensitive and can have up to 31 characters,
    which an be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on
    Panorama, unique within its device group and any ancestor or descendant device groups.

    vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1.

    ipnetmask(str): The IPv4 or IPv6 address or IP address range using the format ip_address/mask or ip_address where
    the mask is the number of significant binary digits used for the network portion of the address. Ideally, for IPv6,
    you specify only the network portion, not the host portion.

    iprange(str): A range of addresses using the format ip_address–ip_address where both addresses can be  IPv4 or both
    can be IPv6.

    fqdn(str): A fully qualified domain name format. The FQDN initially resolves at commit time. Entries are
    subsequently refreshed when the firewall performs a check every 30 minutes; all changes in the IP address for the
    entries are picked up at the refresh cycle.

    description(str): A description for the policy (up to 255 characters).

    commit(bool): If true the firewall will commit the changes, if false do not commit changes.

    SLS Example:

    .. code-block:: yaml

        panos/address/h-10.10.10.10:
            panos.address_exists:
              - addressname: h-10.10.10.10
              - vsys: 1
              - ipnetmask: 10.10.10.10
              - commit: False

        panos/address/10.0.0.1-10.0.0.50:
            panos.address_exists:
              - addressname: r-10.0.0.1-10.0.0.50
              - vsys: 1
              - iprange: 10.0.0.1-10.0.0.50
              - commit: False

        panos/address/foo.bar.com:
            panos.address_exists:
              - addressname: foo.bar.com
              - vsys: 1
              - fqdn: foo.bar.com
              - description: My fqdn object
              - commit: False

    """
    ret = _default_ret(name)

    if not addressname:
        ret.update({"comment": "The service name field must be provided."})
        return ret

    # Check if address object currently exists
    address = __salt__["panos.get_address"](addressname, vsys)["result"]

    if address and "entry" in address:
        address = address["entry"]
    else:
        address = {}

    element = ""

    # Verify the arguments
    if ipnetmask:
        element = "<ip-netmask>{0}</ip-netmask>".format(ipnetmask)
    elif iprange:
        element = "<ip-range>{0}</ip-range>".format(iprange)
    elif fqdn:
        element = "<fqdn>{0}</fqdn>".format(fqdn)
    else:
        ret.update({"comment": "A valid address type must be specified."})
        return ret

    if description:
        element += "<description>{0}</description>".format(description)

    full_element = "<entry name='{0}'>{1}</entry>".format(addressname, element)

    new_address = xml.to_dict(ET.fromstring(full_element), True)

    if address == new_address:
        ret.update({
            "comment": "Address object already exists. No changes required.",
            "result": True,
        })
        return ret
    else:
        xpath = (
            "/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{0}']/address/"
            "entry[@name='{1}']".format(vsys, addressname))

        result, msg = _edit_config(xpath, full_element)

        if not result:
            ret.update({"comment": msg})
            return ret

    if commit is True:
        ret.update({
            "changes": {
                "before": address,
                "after": new_address
            },
            "commit": __salt__["panos.commit"](),
            "comment": "Address object successfully configured.",
            "result": True,
        })
    else:
        ret.update({
            "changes": {
                "before": address,
                "after": new_address
            },
            "comment": "Service object successfully configured.",
            "result": True,
        })

    return ret
示例#51
0
def query(params=None,
          setname=None,
          requesturl=None,
          location=None,
          return_url=False,
          return_root=False,
          opts=None,
          provider=None,
          endpoint=None,
          product='ec2',
          sigver='2'):
    '''
    Perform a query against AWS services using Signature Version 2 Signing
    Process. This is documented at:

    http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html

    Regions and endpoints are documented at:

    http://docs.aws.amazon.com/general/latest/gr/rande.html

    Default ``product`` is ``ec2``. Valid ``product`` names are:

    .. code-block: yaml

        - autoscaling (Auto Scaling)
        - cloudformation (CloudFormation)
        - ec2 (Elastic Compute Cloud)
        - elasticache (ElastiCache)
        - elasticbeanstalk (Elastic BeanStalk)
        - elasticloadbalancing (Elastic Load Balancing)
        - elasticmapreduce (Elastic MapReduce)
        - iam (Identity and Access Management)
        - importexport (Import/Export)
        - monitoring (CloudWatch)
        - rds (Relational Database Service)
        - simpledb (SimpleDB)
        - sns (Simple Notification Service)
        - sqs (Simple Queue Service)
    '''
    if params is None:
        params = {}

    if opts is None:
        opts = {}

    function = opts.get('function', (None, product))
    providers = opts.get('providers', {})

    if provider is None:
        prov_dict = providers.get(function[1], {}).get(product, {})
        if prov_dict:
            driver = list(list(prov_dict.keys()))[0]
            provider = providers.get(driver, product)
    else:
        prov_dict = providers.get(provider, {}).get(product, {})

    service_url = prov_dict.get('service_url', 'amazonaws.com')

    if not location:
        location = get_location(opts, provider)

    if endpoint is None:
        if not requesturl:
            endpoint = prov_dict.get(
                'endpoint', '{0}.{1}.{2}'.format(product, location,
                                                 service_url))

            requesturl = 'https://{0}/'.format(endpoint)
        else:
            endpoint = urlparse(requesturl).netloc
            if endpoint == '':
                endpoint_err = (
                    'Could not find a valid endpoint in the '
                    'requesturl: {0}. Looking for something '
                    'like https://some.aws.endpoint/?args').format(requesturl)
                LOG.error(endpoint_err)
                if return_url is True:
                    return {'error': endpoint_err}, requesturl
                return {'error': endpoint_err}

    LOG.debug('Using AWS endpoint: {0}'.format(endpoint))
    method = 'GET'

    aws_api_version = prov_dict.get(
        'aws_api_version',
        prov_dict.get('{0}_api_version'.format(product),
                      DEFAULT_AWS_API_VERSION))

    # Fallback to ec2's id & key if none is found, for this component
    if not prov_dict.get('id', None):
        prov_dict['id'] = providers.get(provider, {}).get('ec2',
                                                          {}).get('id', {})
        prov_dict['key'] = providers.get(provider, {}).get('ec2',
                                                           {}).get('key', {})

    if sigver == '4':
        headers, requesturl = sig4(method,
                                   endpoint,
                                   params,
                                   prov_dict,
                                   aws_api_version,
                                   location,
                                   product,
                                   requesturl=requesturl)
        params_with_headers = {}
    else:
        params_with_headers = sig2(method, endpoint, params, prov_dict,
                                   aws_api_version)
        headers = {}

    attempts = 5
    while attempts > 0:
        LOG.debug('AWS Request: {0}'.format(requesturl))
        LOG.trace('AWS Request Parameters: {0}'.format(params_with_headers))
        try:
            result = requests.get(requesturl,
                                  headers=headers,
                                  params=params_with_headers)
            LOG.debug('AWS Response Status Code: {0}'.format(
                result.status_code))
            LOG.trace('AWS Response Text: {0}'.format(result.text))
            result.raise_for_status()
            break
        except requests.exceptions.HTTPError as exc:
            root = ET.fromstring(exc.response.content)
            data = xml.to_dict(root)

            # check to see if we should retry the query
            err_code = data.get('Errors', {}).get('Error', {}).get('Code', '')
            if attempts > 0 and err_code and err_code in AWS_RETRY_CODES:
                attempts -= 1
                LOG.error('AWS Response Status Code and Error: [{0} {1}] {2}; '
                          'Attempts remaining: {3}'.format(
                              exc.response.status_code, exc, data, attempts))
                # Wait a bit before continuing to prevent throttling
                time.sleep(2)
                continue

            LOG.error(
                'AWS Response Status Code and Error: [{0} {1}] {2}'.format(
                    exc.response.status_code, exc, data))
            if return_url is True:
                return {'error': data}, requesturl
            return {'error': data}
    else:
        LOG.error('AWS Response Status Code and Error: [{0} {1}] {2}'.format(
            exc.response.status_code, exc, data))
        if return_url is True:
            return {'error': data}, requesturl
        return {'error': data}

    response = result.text

    root = ET.fromstring(response)
    items = root[1]
    if return_root is True:
        items = root

    if setname:
        if sys.version_info < (2, 7):
            children_len = len(root.getchildren())
        else:
            children_len = len(root)

        for item in range(0, children_len):
            comps = root[item].tag.split('}')
            if comps[1] == setname:
                items = root[item]

    ret = []
    for item in items:
        ret.append(xml.to_dict(item))

    if return_url is True:
        return ret, requesturl

    return ret
示例#52
0
def address_group_exists(name,
                         groupname=None,
                         vsys=1,
                         members=None,
                         description=None,
                         commit=False):
    """
    Ensures that an address group object exists in the configured state. If it does not exist or is not configured with
    the specified attributes, it will be adjusted to match the specified values.

    This module will enforce group membership. If a group exists and contains members this state does not include,
    those members will be removed and replaced with the specified members in the state.

    name: The name of the module function to execute.

    groupname(str): The name of the address group object.  The name is case-sensitive and can have up to 31 characters,
    which an be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on
    Panorama, unique within its device group and any ancestor or descendant device groups.

    vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1.

    members(str, list): The members of the address group. These must be valid address objects or address groups on the
    system that already exist prior to the execution of this state.

    description(str): A description for the policy (up to 255 characters).

    commit(bool): If true the firewall will commit the changes, if false do not commit changes.

    SLS Example:

    .. code-block:: yaml

        panos/address-group/my-group:
            panos.address_group_exists:
              - groupname: my-group
              - vsys: 1
              - members:
                - my-address-object
                - my-other-address-group
              - description: A group that needs to exist
              - commit: False

    """
    ret = _default_ret(name)

    if not groupname:
        ret.update({"comment": "The group name field must be provided."})
        return ret

    # Check if address group object currently exists
    group = __salt__["panos.get_address_group"](groupname, vsys)["result"]

    if group and "entry" in group:
        group = group["entry"]
    else:
        group = {}

    # Verify the arguments
    if members:
        element = "<static>{0}</static>".format(_build_members(members, True))
    else:
        ret.update({"comment": "The group members must be provided."})
        return ret

    if description:
        element += "<description>{0}</description>".format(description)

    full_element = "<entry name='{0}'>{1}</entry>".format(groupname, element)

    new_group = xml.to_dict(ET.fromstring(full_element), True)

    if group == new_group:
        ret.update({
            "comment":
            "Address group object already exists. No changes required.",
            "result": True,
        })
        return ret
    else:
        xpath = (
            "/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{0}']/address-group/"
            "entry[@name='{1}']".format(vsys, groupname))

        result, msg = _edit_config(xpath, full_element)

        if not result:
            ret.update({"comment": msg})
            return ret

    if commit is True:
        ret.update({
            "changes": {
                "before": group,
                "after": new_group
            },
            "commit": __salt__["panos.commit"](),
            "comment": "Address group object successfully configured.",
            "result": True,
        })
    else:
        ret.update({
            "changes": {
                "before": group,
                "after": new_group
            },
            "comment": "Address group object successfully configured.",
            "result": True,
        })

    return ret
示例#53
0
 def test_xml_case_f_legacy(self):
     xmldata = ET.fromstring(self.cases['f']['xml'])
     defaultdict = xml.to_dict(xmldata, False)
     self.assertEqual(defaultdict, self.cases['f']['legacy'])
示例#54
0
def edit_config(name, xpath=None, value=None, commit=False):
    """
    Edits a Palo Alto XPATH to a specific value. This will always overwrite the existing value, even if it is not
    changed.

    You can replace an existing object hierarchy at a specified location in the configuration with a new value. Use
    the xpath parameter to specify the location of the object, including the node to be replaced.

    This is the recommended state to enforce configurations on a xpath.

    name: The name of the module function to execute.

    xpath(str): The XPATH of the configuration API tree to control.

    value(str): The XML value to edit. This must be a child to the XPATH.

    commit(bool): If true the firewall will commit the changes, if false do not commit changes.

    SLS Example:

    .. code-block:: yaml

        panos/addressgroup:
            panos.edit_config:
              - xpath: /config/devices/entry/vsys/entry[@name='vsys1']/address-group/entry[@name='test']
              - value: <static><entry name='test'><member>abc</member><member>xyz</member></entry></static>
              - commit: True

    """
    ret = _default_ret(name)

    # Verify if the current XPATH is equal to the specified value.
    # If we are equal, no changes required.
    xpath_split = xpath.split("/")

    # Retrieve the head of the xpath for validation.
    if xpath_split:
        head = xpath_split[-1]
        if "[" in head:
            head = head.split("[")[0]

    current_element = __salt__["panos.get_xpath"](xpath)["result"]

    if head and current_element and head in current_element:
        current_element = current_element[head]
    else:
        current_element = {}

    new_element = xml.to_dict(ET.fromstring(value), True)

    if current_element == new_element:
        ret.update({
            "comment": "XPATH is already equal to the specified value.",
            "result": True,
        })
        return ret

    result, msg = _edit_config(xpath, value)

    ret.update({"comment": msg, "result": result})

    if not result:
        return ret

    if commit is True:
        ret.update({
            "changes": {
                "before": current_element,
                "after": new_element
            },
            "commit": __salt__["panos.commit"](),
            "result": True,
        })
    else:
        ret.update({
            "changes": {
                "before": current_element,
                "after": new_element
            },
            "result": True,
        })

    return ret
示例#55
0
def service_exists(
    name,
    servicename=None,
    vsys=1,
    protocol=None,
    port=None,
    description=None,
    commit=False,
):
    """
    Ensures that a service object exists in the configured state. If it does not exist or is not configured with the
    specified attributes, it will be adjusted to match the specified values.

    name: The name of the module function to execute.

    servicename(str): The name of the security object.  The name is case-sensitive and can have up to 31 characters,
    which an be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on
    Panorama, unique within its device group and any ancestor or descendant device groups.

    vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1.

    protocol(str): The protocol that is used by the service object. The only valid options are tcp and udp.

    port(str): The port number that is used by the service object. This can be specified as a single integer or a
    valid range of ports.

    description(str): A description for the policy (up to 255 characters).

    commit(bool): If true the firewall will commit the changes, if false do not commit changes.

    SLS Example:

    .. code-block:: yaml

        panos/service/tcp-80:
            panos.service_exists:
              - servicename: tcp-80
              - vsys: 1
              - protocol: tcp
              - port: 80
              - description: Hypertext Transfer Protocol
              - commit: False

        panos/service/udp-500-550:
            panos.service_exists:
              - servicename: udp-500-550
              - vsys: 3
              - protocol: udp
              - port: 500-550
              - commit: False

    """
    ret = _default_ret(name)

    if not servicename:
        ret.update({"comment": "The service name field must be provided."})
        return ret

    # Check if service object currently exists
    service = __salt__["panos.get_service"](servicename, vsys)["result"]

    if service and "entry" in service:
        service = service["entry"]
    else:
        service = {}

    # Verify the arguments
    if not protocol and protocol not in ["tcp", "udp"]:
        ret.update({
            "comment":
            "The protocol must be provided and must be tcp or udp."
        })
        return ret
    if not port:
        ret.update({"comment": "The port field must be provided."})
        return ret

    element = "<protocol><{0}><port>{1}</port></{0}></protocol>".format(
        protocol, port)

    if description:
        element += "<description>{0}</description>".format(description)

    full_element = "<entry name='{0}'>{1}</entry>".format(servicename, element)

    new_service = xml.to_dict(ET.fromstring(full_element), True)

    if service == new_service:
        ret.update({
            "comment": "Service object already exists. No changes required.",
            "result": True,
        })
        return ret
    else:
        xpath = (
            "/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{0}']/service/"
            "entry[@name='{1}']".format(vsys, servicename))

        result, msg = _edit_config(xpath, full_element)

        if not result:
            ret.update({"comment": msg})
            return ret

    if commit is True:
        ret.update({
            "changes": {
                "before": service,
                "after": new_service
            },
            "commit": __salt__["panos.commit"](),
            "comment": "Service object successfully configured.",
            "result": True,
        })
    else:
        ret.update({
            "changes": {
                "before": service,
                "after": new_service
            },
            "comment": "Service object successfully configured.",
            "result": True,
        })

    return ret
示例#56
0
def query(url,
          method="GET",
          params=None,
          data=None,
          data_file=None,
          header_dict=None,
          header_list=None,
          header_file=None,
          username=None,
          password=None,
          auth=None,
          decode=False,
          decode_type="auto",
          status=False,
          headers=False,
          text=False,
          cookies=None,
          cookie_jar=None,
          cookie_format="lwp",
          persist_session=False,
          session_cookie_jar=None,
          data_render=False,
          data_renderer=None,
          header_render=False,
          header_renderer=None,
          template_dict=None,
          test=False,
          test_url=None,
          node="minion",
          port=80,
          opts=None,
          backend=None,
          ca_bundle=None,
          verify_ssl=None,
          cert=None,
          text_out=None,
          headers_out=None,
          decode_out=None,
          stream=False,
          streaming_callback=None,
          header_callback=None,
          handle=False,
          agent=USERAGENT,
          hide_fields=None,
          raise_error=True,
          formdata=False,
          formdata_fieldname=None,
          formdata_filename=None,
          decode_body=True,
          **kwargs):
    """
    Query a resource, and decode the return data
    """
    ret = {}

    if opts is None:
        if node == "master":
            opts = salt.config.master_config(
                os.path.join(salt.syspaths.CONFIG_DIR, "master"))
        elif node == "minion":
            opts = salt.config.minion_config(
                os.path.join(salt.syspaths.CONFIG_DIR, "minion"))
        else:
            opts = {}

    if not backend:
        backend = opts.get("backend", "tornado")

    match = re.match(
        r"https?://((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)($|/)",
        url,
    )
    if not match:
        salt.utils.network.refresh_dns()

    if backend == "requests":
        if HAS_REQUESTS is False:
            ret["error"] = ("http.query has been set to use requests, but the "
                            "requests library does not seem to be installed")
            log.error(ret["error"])
            return ret
        else:
            requests_log = logging.getLogger("requests")
            requests_log.setLevel(logging.WARNING)

    # Some libraries don't support separation of url and GET parameters
    # Don't need a try/except block, since Salt depends on tornado
    url_full = salt.ext.tornado.httputil.url_concat(url,
                                                    params) if params else url

    if ca_bundle is None:
        ca_bundle = get_ca_bundle(opts)

    if verify_ssl is None:
        verify_ssl = opts.get("verify_ssl", True)

    if cert is None:
        cert = opts.get("cert", None)

    if data_file is not None:
        data = _render(data_file, data_render, data_renderer, template_dict,
                       opts)

    # Make sure no secret fields show up in logs
    log_url = sanitize_url(url_full, hide_fields)

    log.debug("Requesting URL %s using %s method", log_url, method)
    log.debug("Using backend: %s", backend)

    if method == "POST" and log.isEnabledFor(logging.TRACE):
        # Make sure no secret fields show up in logs
        if isinstance(data, dict):
            log_data = data.copy()
            if isinstance(hide_fields, list):
                for item in data:
                    for field in hide_fields:
                        if item == field:
                            log_data[item] = "XXXXXXXXXX"
            log.trace("Request POST Data: %s", pprint.pformat(log_data))
        else:
            log.trace("Request POST Data: %s", pprint.pformat(data))

    if header_file is not None:
        header_tpl = _render(header_file, header_render, header_renderer,
                             template_dict, opts)
        if isinstance(header_tpl, dict):
            header_dict = header_tpl
        else:
            header_list = header_tpl.splitlines()

    if header_dict is None:
        header_dict = {}

    if header_list is None:
        header_list = []

    if cookie_jar is None:
        cookie_jar = os.path.join(
            opts.get("cachedir", salt.syspaths.CACHE_DIR), "cookies.txt")
    if session_cookie_jar is None:
        session_cookie_jar = os.path.join(
            opts.get("cachedir", salt.syspaths.CACHE_DIR), "cookies.session.p")

    if persist_session is True and salt.utils.msgpack.HAS_MSGPACK:
        # TODO: This is hackish; it will overwrite the session cookie jar with
        # all cookies from this one connection, rather than behaving like a
        # proper cookie jar. Unfortunately, since session cookies do not
        # contain expirations, they can't be stored in a proper cookie jar.
        if os.path.isfile(session_cookie_jar):
            with salt.utils.files.fopen(session_cookie_jar, "rb") as fh_:
                session_cookies = salt.utils.msgpack.load(fh_)
            if isinstance(session_cookies, dict):
                header_dict.update(session_cookies)
        else:
            with salt.utils.files.fopen(session_cookie_jar, "wb") as fh_:
                salt.utils.msgpack.dump("", fh_)

    for header in header_list:
        comps = header.split(":")
        if len(comps) < 2:
            continue
        header_dict[comps[0].strip()] = comps[1].strip()

    if not auth:
        if username and password:
            auth = (username, password)

    if agent == USERAGENT:
        agent = "{0} http.query()".format(agent)
    header_dict["User-agent"] = agent

    if backend == "requests":
        sess = requests.Session()
        sess.auth = auth
        sess.headers.update(header_dict)
        log.trace("Request Headers: %s", sess.headers)
        sess_cookies = sess.cookies
        sess.verify = verify_ssl
    elif backend == "urllib2":
        sess_cookies = None
    else:
        # Tornado
        sess_cookies = None

    if cookies is not None:
        if cookie_format == "mozilla":
            sess_cookies = salt.ext.six.moves.http_cookiejar.MozillaCookieJar(
                cookie_jar)
        else:
            sess_cookies = salt.ext.six.moves.http_cookiejar.LWPCookieJar(
                cookie_jar)
        if not os.path.isfile(cookie_jar):
            sess_cookies.save()
        sess_cookies.load()

    if test is True:
        if test_url is None:
            return {}
        else:
            url = test_url
            ret["test"] = True

    if backend == "requests":
        req_kwargs = {}
        if stream is True:
            if requests.__version__[0] == "0":
                # 'stream' was called 'prefetch' before 1.0, with flipped meaning
                req_kwargs["prefetch"] = False
            else:
                req_kwargs["stream"] = True

        # Client-side cert handling
        if cert is not None:
            if isinstance(cert, six.string_types):
                if os.path.exists(cert):
                    req_kwargs["cert"] = cert
            elif isinstance(cert, list):
                if os.path.exists(cert[0]) and os.path.exists(cert[1]):
                    req_kwargs["cert"] = cert
            else:
                log.error(
                    "The client-side certificate path that"
                    " was passed is not valid: %s",
                    cert,
                )

        if formdata:
            if not formdata_fieldname:
                ret["error"] = "formdata_fieldname is required when formdata=True"
                log.error(ret["error"])
                return ret
            result = sess.request(method,
                                  url,
                                  params=params,
                                  files={
                                      formdata_fieldname:
                                      (formdata_filename, StringIO(data))
                                  },
                                  **req_kwargs)
        else:
            result = sess.request(method,
                                  url,
                                  params=params,
                                  data=data,
                                  **req_kwargs)
        result.raise_for_status()
        if stream is True:
            # fake a HTTP response header
            header_callback("HTTP/1.0 {0} MESSAGE".format(result.status_code))
            # fake streaming the content
            streaming_callback(result.content)
            return {
                "handle": result,
            }

        if handle is True:
            return {
                "handle": result,
                "body": result.content,
            }

        log.debug("Final URL location of Response: %s",
                  sanitize_url(result.url, hide_fields))

        result_status_code = result.status_code
        result_headers = result.headers
        result_text = result.content
        result_cookies = result.cookies
        body = result.content
        if not isinstance(body, six.text_type) and decode_body:
            body = body.decode(result.encoding or "utf-8")
        ret["body"] = body
    elif backend == "urllib2":
        request = urllib_request.Request(url_full, data)
        handlers = [
            urllib_request.HTTPHandler,
            urllib_request.HTTPCookieProcessor(sess_cookies),
        ]

        if url.startswith("https"):
            hostname = request.get_host()
            handlers[0] = urllib_request.HTTPSHandler(1)
            if not HAS_MATCHHOSTNAME:
                log.warning(
                    "match_hostname() not available, SSL hostname checking "
                    "not available. THIS CONNECTION MAY NOT BE SECURE!")
            elif verify_ssl is False:
                log.warning("SSL certificate verification has been explicitly "
                            "disabled. THIS CONNECTION MAY NOT BE SECURE!")
            else:
                if ":" in hostname:
                    hostname, port = hostname.split(":")
                else:
                    port = 443
                sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                sock.connect((hostname, int(port)))
                sockwrap = ssl.wrap_socket(sock,
                                           ca_certs=ca_bundle,
                                           cert_reqs=ssl.CERT_REQUIRED)
                try:
                    match_hostname(sockwrap.getpeercert(), hostname)
                except CertificateError as exc:
                    ret["error"] = (
                        "The certificate was invalid. Error returned was: %s",
                        pprint.pformat(exc),
                    )
                    return ret

                # Client-side cert handling
                if cert is not None:
                    cert_chain = None
                    if isinstance(cert, six.string_types):
                        if os.path.exists(cert):
                            cert_chain = cert
                    elif isinstance(cert, list):
                        if os.path.exists(cert[0]) and os.path.exists(cert[1]):
                            cert_chain = cert
                    else:
                        log.error(
                            "The client-side certificate path that was "
                            "passed is not valid: %s",
                            cert,
                        )
                        return
                    if hasattr(ssl, "SSLContext"):
                        # Python >= 2.7.9
                        context = ssl.SSLContext.load_cert_chain(*cert_chain)
                        handlers.append(
                            urllib_request.HTTPSHandler(context=context))  # pylint: disable=E1123
                    else:
                        # Python < 2.7.9
                        cert_kwargs = {
                            "host": request.get_host(),
                            "port": port,
                            "cert_file": cert_chain[0],
                        }
                        if len(cert_chain) > 1:
                            cert_kwargs["key_file"] = cert_chain[1]
                        handlers[
                            0] = salt.ext.six.moves.http_client.HTTPSConnection(
                                **cert_kwargs)

        opener = urllib_request.build_opener(*handlers)
        for header in header_dict:
            request.add_header(header, header_dict[header])
        request.get_method = lambda: method
        try:
            result = opener.open(request)
        except URLError as exc:
            return {"Error": six.text_type(exc)}
        if stream is True or handle is True:
            return {
                "handle": result,
                "body": result.content,
            }

        result_status_code = result.code
        result_headers = dict(result.info())
        result_text = result.read()
        if "Content-Type" in result_headers:
            res_content_type, res_params = cgi.parse_header(
                result_headers["Content-Type"])
            if (res_content_type.startswith("text/")
                    and "charset" in res_params
                    and not isinstance(result_text, six.text_type)):
                result_text = result_text.decode(res_params["charset"])
        if six.PY3 and isinstance(result_text, bytes) and decode_body:
            result_text = result_text.decode("utf-8")
        ret["body"] = result_text
    else:
        # Tornado
        req_kwargs = {}

        # Client-side cert handling
        if cert is not None:
            if isinstance(cert, six.string_types):
                if os.path.exists(cert):
                    req_kwargs["client_cert"] = cert
            elif isinstance(cert, list):
                if os.path.exists(cert[0]) and os.path.exists(cert[1]):
                    req_kwargs["client_cert"] = cert[0]
                    req_kwargs["client_key"] = cert[1]
            else:
                log.error(
                    "The client-side certificate path that "
                    "was passed is not valid: %s",
                    cert,
                )

        if isinstance(data, dict):
            data = _urlencode(data)

        if verify_ssl:
            req_kwargs["ca_certs"] = ca_bundle

        max_body = opts.get("http_max_body",
                            salt.config.DEFAULT_MINION_OPTS["http_max_body"])
        connect_timeout = opts.get(
            "http_connect_timeout",
            salt.config.DEFAULT_MINION_OPTS["http_connect_timeout"],
        )
        timeout = opts.get(
            "http_request_timeout",
            salt.config.DEFAULT_MINION_OPTS["http_request_timeout"],
        )

        client_argspec = None

        proxy_host = opts.get("proxy_host", None)
        if proxy_host:
            # tornado requires a str for proxy_host, cannot be a unicode str in py2
            proxy_host = salt.utils.stringutils.to_str(proxy_host)
        proxy_port = opts.get("proxy_port", None)
        proxy_username = opts.get("proxy_username", None)
        if proxy_username:
            # tornado requires a str, cannot be unicode str in py2
            proxy_username = salt.utils.stringutils.to_str(proxy_username)
        proxy_password = opts.get("proxy_password", None)
        if proxy_password:
            # tornado requires a str, cannot be unicode str in py2
            proxy_password = salt.utils.stringutils.to_str(proxy_password)
        no_proxy = opts.get("no_proxy", [])

        # Since tornado doesnt support no_proxy, we'll always hand it empty proxies or valid ones
        # except we remove the valid ones if a url has a no_proxy hostname in it
        if urlparse(url_full).hostname in no_proxy:
            proxy_host = None
            proxy_port = None

        # We want to use curl_http if we have a proxy defined
        if proxy_host and proxy_port:
            if HAS_CURL_HTTPCLIENT is False:
                ret["error"] = (
                    "proxy_host and proxy_port has been set. This requires pycurl and tornado, "
                    "but the libraries does not seem to be installed")
                log.error(ret["error"])
                return ret

            salt.ext.tornado.httpclient.AsyncHTTPClient.configure(
                "tornado.curl_httpclient.CurlAsyncHTTPClient")
            client_argspec = salt.utils.args.get_function_argspec(
                salt.ext.tornado.curl_httpclient.CurlAsyncHTTPClient.initialize
            )
        else:
            client_argspec = salt.utils.args.get_function_argspec(
                salt.ext.tornado.simple_httpclient.SimpleAsyncHTTPClient.
                initialize)

        supports_max_body_size = "max_body_size" in client_argspec.args

        req_kwargs.update({
            "method": method,
            "headers": header_dict,
            "auth_username": username,
            "auth_password": password,
            "body": data,
            "validate_cert": verify_ssl,
            "allow_nonstandard_methods": True,
            "streaming_callback": streaming_callback,
            "header_callback": header_callback,
            "connect_timeout": connect_timeout,
            "request_timeout": timeout,
            "proxy_host": proxy_host,
            "proxy_port": proxy_port,
            "proxy_username": proxy_username,
            "proxy_password": proxy_password,
            "raise_error": raise_error,
            "decompress_response": False,
        })

        # Unicode types will cause a TypeError when Tornado's curl HTTPClient
        # invokes setopt. Therefore, make sure all arguments we pass which
        # contain strings are str types.
        req_kwargs = salt.utils.data.decode(req_kwargs, to_str=True)

        try:
            download_client = (HTTPClient(max_body_size=max_body)
                               if supports_max_body_size else HTTPClient())
            result = download_client.fetch(url_full, **req_kwargs)
        except salt.ext.tornado.httpclient.HTTPError as exc:
            ret["status"] = exc.code
            ret["error"] = six.text_type(exc)
            return ret
        except (socket.herror, socket.error, socket.timeout,
                socket.gaierror) as exc:
            if status is True:
                ret["status"] = 0
            ret["error"] = six.text_type(exc)
            log.debug("Cannot perform 'http.query': {0} - {1}".format(
                url_full, ret["error"]))
            return ret

        if stream is True or handle is True:
            return {
                "handle": result,
                "body": result.body,
            }

        result_status_code = result.code
        result_headers = result.headers
        result_text = result.body
        if "Content-Type" in result_headers:
            res_content_type, res_params = cgi.parse_header(
                result_headers["Content-Type"])
            if (res_content_type.startswith("text/")
                    and "charset" in res_params
                    and not isinstance(result_text, six.text_type)):
                result_text = result_text.decode(res_params["charset"])
        if six.PY3 and isinstance(result_text, bytes) and decode_body:
            result_text = result_text.decode("utf-8")
        ret["body"] = result_text
        if "Set-Cookie" in result_headers and cookies is not None:
            result_cookies = parse_cookie_header(result_headers["Set-Cookie"])
            for item in result_cookies:
                sess_cookies.set_cookie(item)
        else:
            result_cookies = None

    if isinstance(result_headers, list):
        result_headers_dict = {}
        for header in result_headers:
            comps = header.split(":")
            result_headers_dict[comps[0].strip()] = ":".join(comps[1:]).strip()
        result_headers = result_headers_dict

    log.debug("Response Status Code: %s", result_status_code)
    log.trace("Response Headers: %s", result_headers)
    log.trace("Response Cookies: %s", sess_cookies)
    # log.trace("Content: %s", result_text)

    coding = result_headers.get("Content-Encoding", "identity")

    # Requests will always decompress the content, and working around that is annoying.
    if backend != "requests":
        result_text = __decompressContent(coding, result_text)

    try:
        log.trace("Response Text: %s", result_text)
    except UnicodeEncodeError as exc:
        log.trace(
            "Cannot Trace Log Response Text: %s. This may be due to "
            "incompatibilities between requests and logging.",
            exc,
        )

    if text_out is not None:
        with salt.utils.files.fopen(text_out, "w") as tof:
            tof.write(result_text)

    if headers_out is not None and os.path.exists(headers_out):
        with salt.utils.files.fopen(headers_out, "w") as hof:
            hof.write(result_headers)

    if cookies is not None:
        sess_cookies.save()

    if persist_session is True and salt.utils.msgpack.HAS_MSGPACK:
        # TODO: See persist_session above
        if "set-cookie" in result_headers:
            with salt.utils.files.fopen(session_cookie_jar, "wb") as fh_:
                session_cookies = result_headers.get("set-cookie", None)
                if session_cookies is not None:
                    salt.utils.msgpack.dump({"Cookie": session_cookies}, fh_)
                else:
                    salt.utils.msgpack.dump("", fh_)

    if status is True:
        ret["status"] = result_status_code

    if headers is True:
        ret["headers"] = result_headers

    if decode is True:
        if decode_type == "auto":
            content_type = result_headers.get("content-type",
                                              "application/json")
            if "xml" in content_type:
                decode_type = "xml"
            elif "json" in content_type:
                decode_type = "json"
            elif "yaml" in content_type:
                decode_type = "yaml"
            else:
                decode_type = "plain"

        valid_decodes = ("json", "xml", "yaml", "plain")
        if decode_type not in valid_decodes:
            ret["error"] = ("Invalid decode_type specified. "
                            "Valid decode types are: {0}".format(
                                pprint.pformat(valid_decodes)))
            log.error(ret["error"])
            return ret

        if decode_type == "json":
            ret["dict"] = salt.utils.json.loads(result_text)
        elif decode_type == "xml":
            ret["dict"] = []
            items = ET.fromstring(result_text)
            for item in items:
                ret["dict"].append(xml.to_dict(item))
        elif decode_type == "yaml":
            ret["dict"] = salt.utils.data.decode(
                salt.utils.yaml.safe_load(result_text))
        else:
            text = True

        if decode_out:
            with salt.utils.files.fopen(decode_out, "w") as dof:
                dof.write(result_text)

    if text is True:
        ret["text"] = result_text

    return ret
示例#57
0
文件: http.py 项目: iquaba/salt
def query(url,
          method='GET',
          params=None,
          data=None,
          data_file=None,
          header_dict=None,
          header_list=None,
          header_file=None,
          username=None,
          password=None,
          auth=None,
          decode=False,
          decode_type='auto',
          status=False,
          headers=False,
          text=False,
          cookies=None,
          cookie_jar=None,
          cookie_format='lwp',
          persist_session=False,
          session_cookie_jar=None,
          data_render=False,
          data_renderer=None,
          header_render=False,
          header_renderer=None,
          template_dict=None,
          test=False,
          test_url=None,
          node='minion',
          port=80,
          opts=None,
          backend='tornado',
          requests_lib=None,
          ca_bundle=None,
          verify_ssl=None,
          cert=None,
          text_out=None,
          headers_out=None,
          decode_out=None,
          stream=False,
          streaming_callback=None,
          handle=False,
          agent=USERAGENT,
          hide_fields=None,
          **kwargs):
    '''
    Query a resource, and decode the return data
    '''
    ret = {}

    if opts is None:
        if node == 'master':
            opts = salt.config.master_config(
                os.path.join(syspaths.CONFIG_DIR, 'master')
            )
        elif node == 'minion':
            opts = salt.config.minion_config(
                os.path.join(syspaths.CONFIG_DIR, 'minion')
            )
        else:
            opts = {}

    if requests_lib is None:
        requests_lib = opts.get('requests_lib', False)

    if requests_lib is True:
        log.warn('Please set "backend" to "requests" instead of setting '
                 '"requests_lib" to "True"')

        if HAS_REQUESTS is False:
            ret['error'] = ('http.query has been set to use requests, but the '
                            'requests library does not seem to be installed')
            log.error(ret['error'])
            return ret

        backend = 'requests'

    else:
        requests_log = logging.getLogger('requests')
        requests_log.setLevel(logging.WARNING)

    # Some libraries don't support separation of url and GET parameters
    # Don't need a try/except block, since Salt depends on tornado
    url_full = tornado.httputil.url_concat(url, params)

    if ca_bundle is None:
        ca_bundle = get_ca_bundle(opts)

    if verify_ssl is None:
        verify_ssl = opts.get('verify_ssl', True)

    if cert is None:
        cert = opts.get('cert', None)

    if data_file is not None:
        data = _render(
            data_file, data_render, data_renderer, template_dict, opts
        )

    # Make sure no secret fields show up in logs
    log_url = sanitize_url(url_full, hide_fields)

    log.debug('Requesting URL {0} using {1} method'.format(log_url, method))
    if method == 'POST':
        # Make sure no secret fields show up in logs
        if isinstance(data, dict):
            log_data = data.copy()
            if isinstance(hide_fields, list):
                for item in data:
                    for field in hide_fields:
                        if item == field:
                            log_data[item] = 'XXXXXXXXXX'
            log.trace('Request POST Data: {0}'.format(pprint.pformat(log_data)))
        else:
            log.trace('Request POST Data: {0}'.format(pprint.pformat(data)))

    if header_file is not None:
        header_tpl = _render(
            header_file, header_render, header_renderer, template_dict, opts
        )
        if isinstance(header_tpl, dict):
            header_dict = header_tpl
        else:
            header_list = header_tpl.splitlines()

    if header_dict is None:
        header_dict = {}

    if header_list is None:
        header_list = []

    if cookie_jar is None:
        cookie_jar = os.path.join(opts.get('cachedir', syspaths.CACHE_DIR), 'cookies.txt')
    if session_cookie_jar is None:
        session_cookie_jar = os.path.join(opts.get('cachedir', syspaths.CACHE_DIR), 'cookies.session.p')

    if persist_session is True and HAS_MSGPACK:
        # TODO: This is hackish; it will overwrite the session cookie jar with
        # all cookies from this one connection, rather than behaving like a
        # proper cookie jar. Unfortunately, since session cookies do not
        # contain expirations, they can't be stored in a proper cookie jar.
        if os.path.isfile(session_cookie_jar):
            with salt.utils.fopen(session_cookie_jar, 'rb') as fh_:
                session_cookies = msgpack.load(fh_)
            if isinstance(session_cookies, dict):
                header_dict.update(session_cookies)
        else:
            with salt.utils.fopen(session_cookie_jar, 'wb') as fh_:
                msgpack.dump('', fh_)

    for header in header_list:
        comps = header.split(':')
        if len(comps) < 2:
            continue
        header_dict[comps[0].strip()] = comps[1].strip()

    if not auth:
        if username and password:
            auth = (username, password)

    if agent == USERAGENT:
        agent = '{0} http.query()'.format(agent)
    header_dict['User-agent'] = agent

    if backend == 'requests':
        sess = requests.Session()
        sess.auth = auth
        sess.headers.update(header_dict)
        log.trace('Request Headers: {0}'.format(sess.headers))
        sess_cookies = sess.cookies
        sess.verify = verify_ssl
    elif backend == 'urllib2':
        sess_cookies = None
    else:
        # Tornado
        sess_cookies = None

    if cookies is not None:
        if cookie_format == 'mozilla':
            sess_cookies = salt.ext.six.moves.http_cookiejar.MozillaCookieJar(cookie_jar)
        else:
            sess_cookies = salt.ext.six.moves.http_cookiejar.LWPCookieJar(cookie_jar)
        if not os.path.isfile(cookie_jar):
            sess_cookies.save()
        sess_cookies.load()

    if test is True:
        if test_url is None:
            return {}
        else:
            url = test_url
            ret['test'] = True

    if backend == 'requests':
        req_kwargs = {}
        if stream is True:
            if requests.__version__[0] == '0':
                # 'stream' was called 'prefetch' before 1.0, with flipped meaning
                req_kwargs['prefetch'] = False
            else:
                req_kwargs['stream'] = True

        # Client-side cert handling
        if cert is not None:
            if isinstance(cert, six.string_types):
                if os.path.exists(cert):
                    req_kwargs['cert'] = cert
            elif isinstance(cert, tuple):
                if os.path.exists(cert[0]) and os.path.exists(cert[1]):
                    req_kwargs['cert'] = cert
            else:
                log.error('The client-side certificate path that was passed is '
                          'not valid: {0}'.format(cert))

        result = sess.request(
            method, url, params=params, data=data, **req_kwargs
        )
        result.raise_for_status()
        if stream is True or handle is True:
            return {
                'handle': result,
                'body': result.content,
            }

        log.debug('Final URL location of Response: {0}'.format(sanitize_url(result.url, hide_fields)))

        result_status_code = result.status_code
        result_headers = result.headers
        result_text = result.content
        result_cookies = result.cookies
        ret['body'] = result.content
    elif backend == 'urllib2':
        request = urllib_request.Request(url_full, data)
        handlers = [
            urllib_request.HTTPHandler,
            urllib_request.HTTPCookieProcessor(sess_cookies)
        ]

        if url.startswith('https'):
            hostname = request.get_host()
            handlers[0] = urllib_request.HTTPSHandler(1)
            if not HAS_MATCHHOSTNAME:
                log.warn(('match_hostname() not available, SSL hostname checking '
                         'not available. THIS CONNECTION MAY NOT BE SECURE!'))
            elif verify_ssl is False:
                log.warn(('SSL certificate verification has been explicitly '
                         'disabled. THIS CONNECTION MAY NOT BE SECURE!'))
            else:
                if ':' in hostname:
                    hostname, port = hostname.split(':')
                else:
                    port = 443
                sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
                sock.connect((hostname, int(port)))
                sockwrap = ssl.wrap_socket(
                    sock,
                    ca_certs=ca_bundle,
                    cert_reqs=ssl.CERT_REQUIRED
                )
                try:
                    match_hostname(sockwrap.getpeercert(), hostname)
                except CertificateError as exc:
                    ret['error'] = (
                        'The certificate was invalid. '
                        'Error returned was: {0}'.format(
                            pprint.pformat(exc)
                        )
                    )
                    return ret

                # Client-side cert handling
                if cert is not None:
                    cert_chain = None
                    if isinstance(cert, six.string_types):
                        if os.path.exists(cert):
                            cert_chain = (cert)
                    elif isinstance(cert, tuple):
                        if os.path.exists(cert[0]) and os.path.exists(cert[1]):
                            cert_chain = cert
                    else:
                        log.error('The client-side certificate path that was '
                                  'passed is not valid: {0}'.format(cert))
                        return
                    if hasattr(ssl, 'SSLContext'):
                        # Python >= 2.7.9
                        context = ssl.SSLContext.load_cert_chain(*cert_chain)
                        handlers.append(urllib_request.HTTPSHandler(context=context))  # pylint: disable=E1123
                    else:
                        # Python < 2.7.9
                        cert_kwargs = {
                            'host': request.get_host(),
                            'port': port,
                            'cert_file': cert_chain[0]
                        }
                        if len(cert_chain) > 1:
                            cert_kwargs['key_file'] = cert_chain[1]
                        handlers[0] = salt.ext.six.moves.http_client.HTTPSConnection(**cert_kwargs)

        opener = urllib_request.build_opener(*handlers)
        for header in header_dict:
            request.add_header(header, header_dict[header])
        request.get_method = lambda: method
        try:
            result = opener.open(request)
        except URLError as exc:
            return {'Error': str(exc)}
        if stream is True or handle is True:
            return {
                'handle': result,
                'body': result.content,
            }

        result_status_code = result.code
        result_headers = result.headers.headers
        result_text = result.read()
        ret['body'] = result_text
    else:
        # Tornado
        req_kwargs = {}

        # Client-side cert handling
        if cert is not None:
            if isinstance(cert, six.string_types):
                if os.path.exists(cert):
                    req_kwargs['client_cert'] = cert
            elif isinstance(cert, tuple):
                if os.path.exists(cert[0]) and os.path.exists(cert[1]):
                    req_kwargs['client_cert'] = cert[0]
                    req_kwargs['client_key'] = cert[1]
            else:
                log.error('The client-side certificate path that was passed is '
                          'not valid: {0}'.format(cert))

        if isinstance(data, dict):
            data = urllib.urlencode(data)

        if verify_ssl:
            req_kwargs['ca_certs'] = ca_bundle

        max_body = opts.get('http_max_body', salt.config.DEFAULT_MINION_OPTS['http_max_body'])
        timeout = opts.get('http_request_timeout', salt.config.DEFAULT_MINION_OPTS['http_request_timeout'])

        client_argspec = None

        proxy_host = opts.get('proxy_host', None)
        proxy_port = opts.get('proxy_port', None)
        proxy_username = opts.get('proxy_username', None)
        proxy_password = opts.get('proxy_password', None)

        # We want to use curl_http if we have a proxy defined
        if proxy_host and proxy_port:
            if HAS_CURL_HTTPCLIENT is False:
                ret['error'] = ('proxy_host and proxy_port has been set. This requires pycurl, but the '
                                'pycurl library does not seem to be installed')
                log.error(ret['error'])
                return ret

            tornado.httpclient.AsyncHTTPClient.configure('tornado.curl_httpclient.CurlAsyncHTTPClient')
            client_argspec = inspect.getargspec(tornado.curl_httpclient.CurlAsyncHTTPClient.initialize)
        else:
            client_argspec = inspect.getargspec(tornado.simple_httpclient.SimpleAsyncHTTPClient.initialize)

        supports_max_body_size = 'max_body_size' in client_argspec.args

        try:
            if supports_max_body_size:
                result = HTTPClient(max_body_size=max_body).fetch(
                    url_full,
                    method=method,
                    headers=header_dict,
                    auth_username=username,
                    auth_password=password,
                    body=data,
                    validate_cert=verify_ssl,
                    allow_nonstandard_methods=True,
                    streaming_callback=streaming_callback,
                    request_timeout=timeout,
                    proxy_host=proxy_host,
                    proxy_port=proxy_port,
                    proxy_username=proxy_username,
                    proxy_password=proxy_password,
                    **req_kwargs
                )
            else:
                result = HTTPClient().fetch(
                    url_full,
                    method=method,
                    headers=header_dict,
                    auth_username=username,
                    auth_password=password,
                    body=data,
                    validate_cert=verify_ssl,
                    allow_nonstandard_methods=True,
                    streaming_callback=streaming_callback,
                    request_timeout=timeout,
                    proxy_host=proxy_host,
                    proxy_port=proxy_port,
                    proxy_username=proxy_username,
                    proxy_password=proxy_password,
                    **req_kwargs
                )
        except tornado.httpclient.HTTPError as exc:
            ret['status'] = exc.code
            ret['error'] = str(exc)
            return ret

        if stream is True or handle is True:
            return {
                'handle': result,
                'body': result.body,
            }

        result_status_code = result.code
        result_headers = result.headers
        result_text = result.body
        ret['body'] = result.body
        if 'Set-Cookie' in result_headers.keys() and cookies is not None:
            result_cookies = parse_cookie_header(result_headers['Set-Cookie'])
            for item in result_cookies:
                sess_cookies.set_cookie(item)
        else:
            result_cookies = None

    if isinstance(result_headers, list):
        result_headers_dict = {}
        for header in result_headers:
            comps = header.split(':')
            result_headers_dict[comps[0].strip()] = ':'.join(comps[1:]).strip()
        result_headers = result_headers_dict

    log.debug('Response Status Code: {0}'.format(result_status_code))
    log.trace('Response Headers: {0}'.format(result_headers))
    log.trace('Response Cookies: {0}'.format(sess_cookies))
    try:
        log.trace('Response Text: {0}'.format(result_text))
    except UnicodeEncodeError as exc:
        log.trace(('Cannot Trace Log Response Text: {0}. This may be due to '
                  'incompatibilities between requests and logging.').format(exc))

    if text_out is not None and os.path.exists(text_out):
        with salt.utils.fopen(text_out, 'w') as tof:
            tof.write(result_text)

    if headers_out is not None and os.path.exists(headers_out):
        with salt.utils.fopen(headers_out, 'w') as hof:
            hof.write(result_headers)

    if cookies is not None:
        sess_cookies.save()

    if persist_session is True and HAS_MSGPACK:
        # TODO: See persist_session above
        if 'set-cookie' in result_headers:
            with salt.utils.fopen(session_cookie_jar, 'wb') as fh_:
                session_cookies = result_headers.get('set-cookie', None)
                if session_cookies is not None:
                    msgpack.dump({'Cookie': session_cookies}, fh_)
                else:
                    msgpack.dump('', fh_)

    if status is True:
        ret['status'] = result_status_code

    if headers is True:
        ret['headers'] = result_headers

    if decode is True:
        if decode_type == 'auto':
            content_type = result_headers.get(
                'content-type', 'application/json'
            )
            if 'xml' in content_type:
                decode_type = 'xml'
            elif 'json' in content_type:
                decode_type = 'json'
            elif 'yaml' in content_type:
                decode_type = 'yaml'
            else:
                decode_type = 'plain'

        valid_decodes = ('json', 'xml', 'yaml', 'plain')
        if decode_type not in valid_decodes:
            ret['error'] = (
                'Invalid decode_type specified. '
                'Valid decode types are: {0}'.format(
                    pprint.pformat(valid_decodes)
                )
            )
            log.error(ret['error'])
            return ret

        if decode_type == 'json':
            ret['dict'] = json.loads(salt.utils.to_str(result_text))
        elif decode_type == 'xml':
            ret['dict'] = []
            items = ET.fromstring(result_text)
            for item in items:
                ret['dict'].append(xml.to_dict(item))
        elif decode_type == 'yaml':
            ret['dict'] = yaml.safe_load(result_text)
        else:
            text = True

        if decode_out and os.path.exists(decode_out):
            with salt.utils.fopen(decode_out, 'w') as dof:
                dof.write(result_text)

    if text is True:
        ret['text'] = result_text

    return ret
示例#58
0
def test_gen_xml_dns():
    """
    Test virt._get_net_xml() with DNS configuration
    """
    xml_data = virt._gen_net_xml(
        "network",
        "main",
        "nat",
        None,
        ip_configs=[{
            "cidr":
            "192.168.2.0/24",
            "dhcp_ranges": [{
                "start": "192.168.2.10",
                "end": "192.168.2.25"
            }],
        }],
        dns={
            "forwarders": [
                {
                    "domain": "example.com",
                    "addr": "192.168.1.1"
                },
                {
                    "addr": "8.8.8.8"
                },
                {
                    "domain": "www.example.com"
                },
            ],
            "txt": {
                "host.widgets.com.": "printer=lpr5",
                "example.com.": "reserved for doc",
            },
            "hosts": {
                "192.168.1.2": ["mirror.acme.lab", "test.acme.lab"]
            },
            "srvs": [
                {
                    "name": "srv1",
                    "protocol": "tcp",
                    "domain": "test-domain-name",
                    "target": ".",
                    "port": 1024,
                    "priority": 10,
                    "weight": 10,
                },
                {
                    "name": "srv2",
                    "protocol": "udp"
                },
            ],
        },
    )
    root = ET.fromstring(xml_data)
    expected_xml = ET.fromstring("""
        <dns>
          <forwarder domain='example.com' addr='192.168.1.1'/>
          <forwarder addr='8.8.8.8'/>
          <forwarder domain='www.example.com'/>
          <txt name='example.com.' value='reserved for doc'/>
          <txt name='host.widgets.com.' value='printer=lpr5'/>
          <host ip='192.168.1.2'>
            <hostname>mirror.acme.lab</hostname>
            <hostname>test.acme.lab</hostname>
          </host>
          <srv name='srv1' protocol='tcp' port='1024' target='.' priority='10' weight='10' domain='test-domain-name'/>
          <srv name='srv2' protocol='udp'/>
        </dns>
        """)
    assert_xml_equals(expected_xml, root.find("./dns"))
示例#59
0
def query(key, keyid, method='GET', params=None, headers=None,
          requesturl=None, return_url=False, bucket=None, service_url=None,
          path=None, return_bin=False, action=None, local_file=None,
          verify_ssl=True):
    '''
    Perform a query against an S3-like API. This function requires that a
    secret key and the id for that key are passed in. For instance:

        s3.keyid: GKTADJGHEIQSXMKKRBJ08H
        s3.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs

    A service_url may also be specified in the configuration::

        s3.service_url: s3.amazonaws.com

    If a service_url is not specified, the default is s3.amazonaws.com. This
    may appear in various documentation as an "endpoint". A comprehensive list
    for Amazon S3 may be found at::

        http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region

    The service_url will form the basis for the final endpoint that is used to
    query the service.

    SSL verification may also be turned off in the configuration:

    s3.verify_ssl: False

    This is required if using S3 bucket names that contain a period, as
    these will not match Amazon's S3 wildcard certificates. Certificate
    verification is enabled by default.
    '''
    if not headers:
        headers = {}

    if not params:
        params = {}

    if path is None:
        path = ''

    if not service_url:
        service_url = 's3.amazonaws.com'

    if bucket:
        endpoint = '{0}.{1}'.format(bucket, service_url)
    else:
        endpoint = service_url

    # Try grabbing the credentials from the EC2 instance IAM metadata if available
    token = None
    if not key or not keyid:
        iam_creds = iam.get_iam_metadata()
        key = iam_creds['secret_key']
        keyid = iam_creds['access_key']
        token = iam_creds['security_token']

    if not requesturl:
        x_amz_date = datetime.datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
        content_type = 'text/plain'
        if method == 'GET':
            if bucket:
                can_resource = '/{0}/{1}'.format(bucket, path)
            else:
                can_resource = '/'
        elif method == 'PUT' or method == 'HEAD' or method == 'DELETE':
            if path:
                can_resource = '/{0}/{1}'.format(bucket, path)
            else:
                can_resource = '/{0}/'.format(bucket)

        if action:
            can_resource += '?{0}'.format(action)

        log.debug('CanonicalizedResource: {0}'.format(can_resource))

        headers['Host'] = endpoint
        headers['Content-type'] = content_type
        headers['Date'] = x_amz_date
        if token:
            headers['x-amz-security-token'] = token

        string_to_sign = '{0}\n'.format(method)

        new_headers = []
        for header in sorted(headers):
            if header.lower().startswith('x-amz'):
                log.debug(header.lower())
                new_headers.append('{0}:{1}'.format(header.lower(),
                                                    headers[header]))
        can_headers = '\n'.join(new_headers)
        log.debug('CanonicalizedAmzHeaders: {0}'.format(can_headers))

        string_to_sign += '\n{0}'.format(content_type)
        string_to_sign += '\n{0}'.format(x_amz_date)
        if can_headers:
            string_to_sign += '\n{0}'.format(can_headers)
        string_to_sign += '\n{0}'.format(can_resource)
        log.debug('String To Sign:: \n{0}'.format(string_to_sign))

        hashed = hmac.new(key, string_to_sign, hashlib.sha1)
        sig = binascii.b2a_base64(hashed.digest())
        headers['Authorization'] = 'AWS {0}:{1}'.format(keyid, sig.strip())

        querystring = urllib.urlencode(params)
        if action:
            if querystring:
                querystring = '{0}&{1}'.format(action, querystring)
            else:
                querystring = action
        requesturl = 'https://{0}/'.format(endpoint)
        if path:
            requesturl += path
        if querystring:
            requesturl += '?{0}'.format(querystring)

    data = None
    if method == 'PUT':
        if local_file:
            with salt.utils.fopen(local_file, 'r') as ifile:
                data = ifile.read()

    log.debug('S3 Request: {0}'.format(requesturl))
    log.debug('S3 Headers::')
    log.debug('    Authorization: {0}'.format(headers['Authorization']))

    try:
        result = requests.request(method, requesturl, headers=headers,
                                  data=data,
                                  verify=verify_ssl)
        response = result.content
    except requests.exceptions.HTTPError as exc:
        log.error('There was an error::')
        if hasattr(exc, 'code') and hasattr(exc, 'msg'):
            log.error('    Code: {0}: {1}'.format(exc.code, exc.msg))
        log.error('    Content: \n{0}'.format(exc.read()))
        return False

    log.debug('S3 Response Status Code: {0}'.format(result.status_code))

    if method == 'PUT':
        if result.status_code == 200:
            if local_file:
                log.debug('Uploaded from {0} to {1}'.format(local_file, path))
            else:
                log.debug('Created bucket {0}'.format(bucket))
        else:
            if local_file:
                log.debug('Failed to upload from {0} to {1}: {2}'.format(
                                                    local_file,
                                                    path,
                                                    result.status_code,
                                                    ))
            else:
                log.debug('Failed to create bucket {0}'.format(bucket))
        return

    if method == 'DELETE':
        if str(result.status_code).startswith('2'):
            if path:
                log.debug('Deleted {0} from bucket {1}'.format(path, bucket))
            else:
                log.debug('Deleted bucket {0}'.format(bucket))
        else:
            if path:
                log.debug('Failed to delete {0} from bucket {1}: {2}'.format(
                                                    path,
                                                    bucket,
                                                    result.status_code,
                                                    ))
            else:
                log.debug('Failed to delete bucket {0}'.format(bucket))
        return

    # This can be used to save a binary object to disk
    if local_file and method == 'GET':
        log.debug('Saving to local file: {0}'.format(local_file))
        with salt.utils.fopen(local_file, 'w') as out:
            out.write(response)
        return 'Saved to local file: {0}'.format(local_file)

    # This can be used to return a binary object wholesale
    if return_bin:
        return response

    if response:
        items = ET.fromstring(response)

        ret = []
        for item in items:
            ret.append(xml.to_dict(item))

        if return_url is True:
            return ret, requesturl
    else:
        if result.status_code != requests.codes.ok:
            return
        ret = {'headers': []}
        for header in result.headers:
            ret['headers'].append(header.strip())

    return ret
示例#60
0
def query(
    params=None,
    setname=None,
    requesturl=None,
    location=None,
    return_url=False,
    return_root=False,
    opts=None,
    provider=None,
    endpoint=None,
    product="ec2",
    sigver="2",
):
    """
    Perform a query against AWS services using Signature Version 2 Signing
    Process. This is documented at:

    http://docs.aws.amazon.com/general/latest/gr/signature-version-2.html

    Regions and endpoints are documented at:

    http://docs.aws.amazon.com/general/latest/gr/rande.html

    Default ``product`` is ``ec2``. Valid ``product`` names are:

    .. code-block:: yaml

        - autoscaling (Auto Scaling)
        - cloudformation (CloudFormation)
        - ec2 (Elastic Compute Cloud)
        - elasticache (ElastiCache)
        - elasticbeanstalk (Elastic BeanStalk)
        - elasticloadbalancing (Elastic Load Balancing)
        - elasticmapreduce (Elastic MapReduce)
        - iam (Identity and Access Management)
        - importexport (Import/Export)
        - monitoring (CloudWatch)
        - rds (Relational Database Service)
        - simpledb (SimpleDB)
        - sns (Simple Notification Service)
        - sqs (Simple Queue Service)
    """
    if params is None:
        params = {}

    if opts is None:
        opts = {}

    function = opts.get("function", (None, product))
    providers = opts.get("providers", {})

    if provider is None:
        prov_dict = providers.get(function[1], {}).get(product, {})
        if prov_dict:
            driver = list(list(prov_dict.keys()))[0]
            provider = providers.get(driver, product)
    else:
        prov_dict = providers.get(provider, {}).get(product, {})

    service_url = prov_dict.get("service_url", "amazonaws.com")

    if not location:
        location = get_location(opts, prov_dict)

    if endpoint is None:
        if not requesturl:
            endpoint = prov_dict.get(
                "endpoint", "{0}.{1}.{2}".format(product, location, service_url)
            )

            requesturl = "https://{0}/".format(endpoint)
        else:
            endpoint = urlparse(requesturl).netloc
            if endpoint == "":
                endpoint_err = (
                    "Could not find a valid endpoint in the "
                    "requesturl: {0}. Looking for something "
                    "like https://some.aws.endpoint/?args"
                ).format(requesturl)
                log.error(endpoint_err)
                if return_url is True:
                    return {"error": endpoint_err}, requesturl
                return {"error": endpoint_err}

    log.debug("Using AWS endpoint: %s", endpoint)
    method = "GET"

    aws_api_version = prov_dict.get(
        "aws_api_version",
        prov_dict.get("{0}_api_version".format(product), DEFAULT_AWS_API_VERSION),
    )

    # Fallback to ec2's id & key if none is found, for this component
    if not prov_dict.get("id", None):
        prov_dict["id"] = providers.get(provider, {}).get("ec2", {}).get("id", {})
        prov_dict["key"] = providers.get(provider, {}).get("ec2", {}).get("key", {})

    if sigver == "4":
        headers, requesturl = sig4(
            method,
            endpoint,
            params,
            prov_dict,
            aws_api_version,
            location,
            product,
            requesturl=requesturl,
        )
        params_with_headers = {}
    else:
        params_with_headers = sig2(method, endpoint, params, prov_dict, aws_api_version)
        headers = {}

    attempts = 0
    while attempts < AWS_MAX_RETRIES:
        log.debug("AWS Request: %s", requesturl)
        log.trace("AWS Request Parameters: %s", params_with_headers)
        try:
            result = requests.get(
                requesturl, headers=headers, params=params_with_headers
            )
            log.debug("AWS Response Status Code: %s", result.status_code)
            log.trace("AWS Response Text: %s", result.text)
            result.raise_for_status()
            break
        except requests.exceptions.HTTPError as exc:
            root = ET.fromstring(exc.response.content)
            data = xml.to_dict(root)

            # check to see if we should retry the query
            err_code = data.get("Errors", {}).get("Error", {}).get("Code", "")
            if attempts < AWS_MAX_RETRIES and err_code and err_code in AWS_RETRY_CODES:
                attempts += 1
                log.error(
                    "AWS Response Status Code and Error: [%s %s] %s; "
                    "Attempts remaining: %s",
                    exc.response.status_code,
                    exc,
                    data,
                    attempts,
                )
                sleep_exponential_backoff(attempts)
                continue

            log.error(
                "AWS Response Status Code and Error: [%s %s] %s",
                exc.response.status_code,
                exc,
                data,
            )
            if return_url is True:
                return {"error": data}, requesturl
            return {"error": data}
    else:
        log.error(
            "AWS Response Status Code and Error: [%s %s] %s",
            exc.response.status_code,
            exc,
            data,
        )
        if return_url is True:
            return {"error": data}, requesturl
        return {"error": data}

    root = ET.fromstring(result.text)
    items = root[1]
    if return_root is True:
        items = root

    if setname:
        if sys.version_info < (2, 7):
            children_len = len(root.getchildren())
        else:
            children_len = len(root)

        for item in range(0, children_len):
            comps = root[item].tag.split("}")
            if comps[1] == setname:
                items = root[item]

    ret = []
    for item in items:
        ret.append(xml.to_dict(item))

    if return_url is True:
        return ret, requesturl

    return ret