Example #1
0
def ext_pillar(
        minion_id,
        pillar,  # pylint: disable=W0613
        url,
        username=None,
        password=None,
        with_grains=False):
    url = url.replace('%s', _quote(minion_id))
    if with_grains:
        # Get the value of the grain and substitute each grain
        # name for the url-encoded version of its grain value.
        for match in re.finditer(grain_pattern, url):
            grain_name = match.group('grain_name')
            grain_value = __salt__['grains.get'](grain_name, None)

            if not grain_value:
                logger.error("Unable to get minion '%s' grain: %s", minion_id,
                             grain_name)
                return {}

            grain_value = _quote(six.text_type(grain_value))
            url = re.sub('<{0}>'.format(grain_name), grain_value, url)
    try:
        r = requests.get(url, auth=(username, password))
        r.raise_for_status()
    except RequestException as e:
        logger.warning(e)
    else:
        return {"outpost": r.json()}
Example #2
0
def ext_pillar(minion_id,
               pillar,  # pylint: disable=W0613
               url,
               with_grains=False,
               username=None,
               password=None):
    '''
    Read pillar data from HTTP response.

    :param str url: Url to request.
    :param bool with_grains: Whether to substitute strings in the url with their grain values.
    :param str username: Username for http basic auth
    :param str password: Password for http basic auth

    :return: A dictionary of the pillar data to add.
    :rtype: dict
    """

    url = url.replace("%s", _quote(minion_id))

    grain_pattern = r"<(?P<grain_name>.*?)>"

    if with_grains:
        # Get the value of the grain and substitute each grain
        # name for the url-encoded version of its grain value.
        for match in re.finditer(grain_pattern, url):
            grain_name = match.group("grain_name")
            grain_value = __salt__["grains.get"](grain_name, None)

            if not grain_value:
                log.error("Unable to get minion '%s' grain: %s", minion_id, grain_name)
                return {}

            grain_value = _quote(six.text_type(grain_value))
            url = re.sub("<{0}>".format(grain_name), grain_value, url)

    log.debug('Getting url: %s', url)

    data = __salt__['http.query'](url=url, username=username, password=password, decode=True, decode_type='yaml')

    if "dict" in data:
        return data["dict"]

    log.error("Error on minion '%s' http query: %s\nMore Info:\n", minion_id, url)

    for key in data:
        log.error("%s: %s", key, data[key])

    return {}
Example #3
0
File: s3.py Project: bryson/salt
def _get_file_from_s3(creds, metadata, saltenv, bucket, path,
                      cached_file_path):
    '''
    Checks the local cache for the file, if it's old or missing go grab the
    file from S3 and update the cache
    '''

    # check the local cache...
    if os.path.isfile(cached_file_path):
        file_meta = _find_file_meta(metadata, bucket, saltenv, path)
        file_md5 = "".join(list(filter(str.isalnum, file_meta['ETag']))) \
            if file_meta else None

        cached_md5 = salt.utils.get_hash(cached_file_path, 'md5')

        log.debug("Cached file: path={0}, md5={1}, etag={2}".format(cached_file_path, cached_md5, file_md5))

        # hashes match we have a cache hit
        log.debug("Cached file: path={0}, md5={1}, etag={2}".format(cached_file_path, cached_md5, file_md5))
        if cached_md5 == file_md5:
            return

    # ... or get the file from S3
    __utils__['s3.query'](
        key=creds.key,
        keyid=creds.keyid,
        kms_keyid=creds.kms_keyid,
        bucket=bucket,
        service_url=creds.service_url,
        path=_quote(path),
        local_file=cached_file_path,
        verify_ssl=creds.verify_ssl,
        location=creds.location
    )
Example #4
0
 def _get_file():
     """
     Helper function that gets the file from S3 and checks if it can be skipped.
     Returns True if the file was downloaded and False if the download was skipped.
     """
     ret = __utils__['s3.query'](key=s3_key_kwargs['key'],
                                 keyid=s3_key_kwargs['keyid'],
                                 kms_keyid=s3_key_kwargs['keyid'],
                                 method='HEAD',
                                 bucket=bucket_name,
                                 service_url=s3_key_kwargs['service_url'],
                                 verify_ssl=s3_key_kwargs['verify_ssl'],
                                 location=s3_key_kwargs['location'],
                                 path=_quote(path),
                                 local_file=cached_file_path,
                                 full_headers=True,
                                 path_style=s3_key_kwargs['path_style'],
                                 https_enable=s3_key_kwargs['https_enable'])
     if ret:
         for header_name, header_value in ret['headers'].items():
             header_name = header_name.strip()
             header_value = header_value.strip()
             if six.text_type(header_name).lower() == 'last-modified':
                 s3_file_mtime = datetime.datetime.strptime(
                     header_value, '%a, %d %b %Y %H:%M:%S %Z')
             elif six.text_type(header_name).lower() == 'content-length':
                 s3_file_size = int(header_value)
         if cached_file_data['size'] == s3_file_size and \
                 cached_file_data['mtime'] > s3_file_mtime:
             log.info(
                 '%s - %s : %s skipped download since cached file size '
                 'equal to and mtime after s3 values', bucket_name, saltenv,
                 path)
             return False
     return True
Example #5
0
def _get_file_from_s3(creds, metadata, saltenv, bucket, path,
                      cached_file_path):
    '''
    Checks the local cache for the file, if it's old or missing go grab the
    file from S3 and update the cache
    '''

    # check the local cache...
    if os.path.isfile(cached_file_path):
        file_meta = _find_file_meta(metadata, bucket, saltenv, path)
        file_md5 = "".join(list(filter(str.isalnum, file_meta['ETag']))) \
            if file_meta else None

        cached_md5 = salt.utils.get_hash(cached_file_path, 'md5')

        log.debug("Cached file: path={0}, md5={1}, etag={2}".format(
            cached_file_path, cached_md5, file_md5))

        # hashes match we have a cache hit
        log.debug("Cached file: path={0}, md5={1}, etag={2}".format(
            cached_file_path, cached_md5, file_md5))
        if cached_md5 == file_md5:
            return

    # ... or get the file from S3
    __utils__['s3.query'](key=creds.key,
                          keyid=creds.keyid,
                          kms_keyid=creds.kms_keyid,
                          bucket=bucket,
                          service_url=creds.service_url,
                          path=_quote(path),
                          local_file=cached_file_path,
                          verify_ssl=creds.verify_ssl,
                          location=creds.location)
Example #6
0
def _get_file_from_s3(creds, metadata, saltenv, bucket, path,
                      cached_file_path):
    '''
    Checks the local cache for the file, if it's old or missing go grab the
    file from S3 and update the cache
    '''

    # check the local cache...
    if os.path.isfile(cached_file_path):
        file_meta = _find_file_meta(metadata, bucket, saltenv, path)
        file_md5 = list(filter(str.isalnum, file_meta['ETag'])) \
            if file_meta else None

        cached_md5 = salt.utils.get_hash(cached_file_path, 'md5')

        # hashes match we have a cache hit
        if cached_md5 == file_md5:
            return

    # ... or get the file from S3
    s3.query(
        key=creds.key,
        keyid=creds.keyid,
        bucket=bucket,
        service_url=creds.service_url,
        path=_quote(path),
        local_file=cached_file_path,
        verify_ssl=creds.verify_ssl
    )
Example #7
0
def ext_pillar(minion_id,
               pillar,  # pylint: disable=W0613
               project='default',
               host='127.0.0.1',
               port=8181,
               username=None,
               password=None,
               grains=[]):
    """
    Read pillar data from Architect service response.

    :return: A dictionary of the pillar data to add.
    :rtype: dict
    """
    url = "{}://{}:{}/salt/{}/enc/{}/{}/pillar".format('http',
                                                       host,
                                                       port,
                                                       'v1',
                                                       project,
                                                       _quote(minion_id))

    log.info('Getting Pillar data from "{}"'.format(url))
    data = __salt__['http.query'](url=url, decode=True, decode_type='yaml')

    if 'dict' in data:
        return data['dict']

    log.error("Error on minion {}, request sent to {}.".format(minion_id, url))

    for key in data:
        log.error('%s: %s', key, data[key])

    return {}
Example #8
0
def ext_pillar(
        minion_id,
        pillar,  # pylint: disable=W0613
        url,
        with_grains=False,
        options=None):
    '''
    Read pillar data from HTTP response.
    :param str url: Url to request.
    :param dict with_options: Custom options passed to salt['http.query'].
    :return: A dictionary of the pillar data to add.
    :rtype: dict
    '''

    url = url.replace('%s', _quote(minion_id))
    options = {} if options is None else options

    log.debug('Getting pillar data from myss: %s', url)
    data = __salt__['http.query'](url=url,
                                  decode=True,
                                  decode_type='json',
                                  **options)
    if 'dict' in data:
        return data['dict']

    log.error("Error on minion '%s' myss query: %s\nMore Info:\n", minion_id,
              url)
    for key in data:
        log.error('%s: %s', key, data[key])

    return {}
Example #9
0
def _get_file_from_s3(creds, metadata, saltenv, bucket, path,
                      cached_file_path):
    '''
    Checks the local cache for the file, if it's old or missing go grab the
    file from S3 and update the cache
    '''

    # check the local cache...
    if os.path.isfile(cached_file_path):
        file_meta = _find_file_meta(metadata, bucket, saltenv, path)
        file_md5 = list(filter(str.isalnum, file_meta['ETag'])) \
            if file_meta else None

        cached_md5 = salt.utils.get_hash(cached_file_path, 'md5')

        # hashes match we have a cache hit
        if cached_md5 == file_md5:
            return

    # ... or get the file from S3
    s3.query(key=creds.key,
             keyid=creds.keyid,
             bucket=bucket,
             service_url=creds.service_url,
             path=_quote(path),
             local_file=cached_file_path,
             verify_ssl=creds.verify_ssl)
Example #10
0
def top(**kwargs):
    """
    Read Top file data from Architect service response.

    :return: A list of Top file services.
    :rtype: list
    """
    minion_id = kwargs['opts']['id']
    if 'http_architect' not in kwargs['opts']:
        raise Exception('http_architect configuration is missing.')
    host = kwargs['opts']['http_architect']['host']
    port = kwargs['opts']['http_architect']['port']
    project = kwargs['opts']['http_architect']['project']
    url = "{}://{}:{}/salt/{}/enc/{}/{}/top".format('http',
                                                    host,
                                                    port,
                                                    'v1',
                                                    project,
                                                    _quote(minion_id))

    log.info('Getting Tops data from "{}"'.format(url))
    data = salt.utils.http.query(url=url, decode=True, decode_type='yaml')

    if 'dict' in data:
        return {'base': data['dict']}

    for key in data:
        log.error('%s: %s', key, data[key])

    return {}
Example #11
0
def ext_pillar(
        minion_id,
        pillar,  # pylint: disable=W0613
        url,
        with_grains=False):
    '''
    Read pillar data from HTTP response.

    :param str url: Url to request.
    :param bool with_grains: Whether to substitute strings in the url with their grain values.

    :return: A dictionary of the pillar data to add.
    :rtype: dict
    '''

    url = url.replace('%s', _quote(minion_id))

    grain_pattern = r'<(?P<grain_name>.*?)>'

    if with_grains:
        # Get the value of the grain and substitute each grain
        # name for the url-encoded version of its grain value.
        for match in re.finditer(grain_pattern, url):
            grain_name = match.group('grain_name')
            grain_value = __salt__['grains.get'](grain_name, None)

            if not grain_value:
                log.error("Unable to get minion '%s' grain: %s", minion_id,
                          grain_name)
                return {}

            grain_value = _quote(six.text_type(grain_value))
            url = re.sub('<{0}>'.format(grain_name), grain_value, url)

    log.debug('Getting url: %s', url)
    data = __salt__['http.query'](url=url, decode=True, decode_type='json')

    if 'dict' in data:
        return data['dict']

    log.error("Error on minion '%s' http query: %s\nMore Info:\n", minion_id,
              url)

    for key in data:
        log.error('%s: %s', key, data[key])

    return {}
Example #12
0
    def percent_encode(line):
        if not isinstance(line, str):
            return line

        s = line
        if sys.stdin.encoding is None:
            s = line.decode().encode('utf8')
        else:
            s = line.decode(sys.stdin.encoding).encode('utf8')
        res = _quote(s, '')
        res = res.replace('+', '%20')
        res = res.replace('*', '%2A')
        res = res.replace('%7E', '~')
        return res
Example #13
0
    def percent_encode(line):
        if not isinstance(line, six.string_types):
            return line

        s = line
        if sys.stdin.encoding is None:
            s = line.decode().encode('utf8')
        else:
            s = line.decode(sys.stdin.encoding).encode('utf8')
        res = _quote(s, '')
        res = res.replace('+', '%20')
        res = res.replace('*', '%2A')
        res = res.replace('%7E', '~')
        return res
Example #14
0
    def percent_encode(line):
        if not isinstance(line, six.string_types):
            return line

        s = line
        if sys.stdin.encoding is None:
            s = line.decode().encode("utf8")
        else:
            s = line.decode(sys.stdin.encoding).encode("utf8")
        res = _quote(s, "")
        res = res.replace("+", "%20")
        res = res.replace("*", "%2A")
        res = res.replace("%7E", "~")
        return res
Example #15
0
def _compute_signature(parameters, access_key_secret, method, path):
    """
    Generate an API request signature. Detailed document can be found at:

    https://docs.qingcloud.com/api/common/signature.html
    """
    parameters["signature_method"] = "HmacSHA256"

    string_to_sign = "{0}\n{1}\n".format(method.upper(), path)

    keys = sorted(parameters.keys())
    pairs = []
    for key in keys:
        val = six.text_type(parameters[key]).encode("utf-8")
        pairs.append(_quote(key, safe="") + "=" + _quote(val, safe="-_~"))
    qs = "&".join(pairs)
    string_to_sign += qs

    h = hmac.new(access_key_secret, digestmod=sha256)
    h.update(string_to_sign)

    signature = base64.b64encode(h.digest()).strip()

    return signature
Example #16
0
def _compute_signature(parameters, access_key_secret, method, path):
    """
    Generate an API request signature. Detailed document can be found at:

    https://docs.qingcloud.com/api/common/signature.html
    """
    parameters["signature_method"] = "HmacSHA256"

    string_to_sign = "{0}\n{1}\n".format(method.upper(), path)

    keys = sorted(parameters.keys())
    pairs = []
    for key in keys:
        val = str(parameters[key]).encode("utf-8")
        pairs.append(_quote(key, safe="") + "=" + _quote(val, safe="-_~"))
    qs = "&".join(pairs)
    string_to_sign += qs

    h = hmac.new(access_key_secret, digestmod=sha256)
    h.update(string_to_sign)

    signature = base64.b64encode(h.digest()).strip()

    return signature
Example #17
0
def _compute_signature(parameters, access_key_secret, method, path):
    '''
    Generate an API request signature. Detailed document can be found at:

    https://docs.qingcloud.com/api/common/signature.html
    '''
    parameters['signature_method'] = 'HmacSHA256'

    string_to_sign = '{0}\n{1}\n'.format(method.upper(), path)

    keys = sorted(parameters.keys())
    pairs = []
    for key in keys:
        val = str(parameters[key]).encode('utf-8')
        pairs.append(_quote(key, safe='') + '=' + _quote(val, safe='-_~'))
    qs = '&'.join(pairs)
    string_to_sign += qs

    h = hmac.new(access_key_secret, digestmod=sha256)
    h.update(string_to_sign)

    signature = base64.b64encode(h.digest()).strip()

    return signature
Example #18
0
def _compute_signature(parameters, access_key_secret, method, path):
    '''
    Generate an API request signature. Detailed document can be found at:

    https://docs.qingcloud.com/api/common/signature.html
    '''
    parameters['signature_method'] = 'HmacSHA256'

    string_to_sign = '{0}\n{1}\n'.format(method.upper(), path)

    keys = sorted(parameters.keys())
    pairs = []
    for key in keys:
        val = six.text_type(parameters[key]).encode('utf-8')
        pairs.append(_quote(key, safe='') + '=' + _quote(val, safe='-_~'))
    qs = '&'.join(pairs)
    string_to_sign += qs

    h = hmac.new(access_key_secret, digestmod=sha256)
    h.update(string_to_sign)

    signature = base64.b64encode(h.digest()).strip()

    return signature
Example #19
0
def _get_file_from_s3(creds, metadata, saltenv, bucket, path,
                      cached_file_path):
    """
    Checks the local cache for the file, if it's old or missing go grab the
    file from S3 and update the cache
    """

    # check the local cache...
    if os.path.isfile(cached_file_path):
        file_meta = _find_file_meta(metadata, bucket, saltenv, path)
        file_md5 = ("".join(list(filter(str.isalnum, file_meta["ETag"])))
                    if file_meta else None)

        cached_md5 = salt.utils.hashutils.get_hash(cached_file_path, "md5")

        # hashes match we have a cache hit
        log.debug(
            "Cached file: path=%s, md5=%s, etag=%s",
            cached_file_path,
            cached_md5,
            file_md5,
        )
        if cached_md5 == file_md5:
            return

    # ... or get the file from S3
    __utils__["s3.query"](
        key=creds.key,
        keyid=creds.keyid,
        kms_keyid=creds.kms_keyid,
        bucket=bucket,
        service_url=creds.service_url,
        path=_quote(path),
        local_file=cached_file_path,
        verify_ssl=creds.verify_ssl,
        location=creds.location,
        path_style=creds.path_style,
        https_enable=creds.https_enable,
    )
Example #20
0
def ext_pillar(
        minion_id,
        pillar,  # pylint: disable=W0613
        url,
        username=None,
        password=None):
    '''
    Read pillar data from HTTP response.

    :param str url: Url to request.
    :param str username: username for basic auth
    :param str password: password for basic auth
    :return: A dictionary of the pillar data to add.
    :rtype: dict
    '''

    url = url.replace('%s', _quote(minion_id))

    _LOG.debug('Getting url: %s', url)

    if username and password:
        data = __salt__['http.query'](url=url,
                                      username=username,
                                      password=password,
                                      decode=True,
                                      decode_type='json')
    else:
        data = __salt__['http.query'](url=url, decode=True, decode_type='json')

    if 'dict' in data:
        return data['dict']

    _LOG.error("Error on minion '%s' http query: %s\nMore Info:\n", minion_id,
               url)

    for key in data:
        _LOG.error('%s: %s', key, data[key])

    return {}
Example #21
0
def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
    '''
    Checks the local cache for the file, if it's old or missing go grab the
    file from S3 and update the cache
    '''
    key, keyid, service_url, verify_ssl = _get_s3_key()

    # check the local cache...
    if os.path.isfile(cached_file_path):
        file_meta = _find_file_meta(metadata, bucket_name, saltenv, path)
        if file_meta:
            file_etag = file_meta['ETag']

            if file_etag.find('-') == -1:
                file_md5 = file_etag
                cached_md5 = salt.utils.get_hash(cached_file_path, 'md5')

                # hashes match we have a cache hit
                if cached_md5 == file_md5:
                    return
            else:
                cached_file_stat = os.stat(cached_file_path)
                cached_file_size = cached_file_stat.st_size
                cached_file_mtime = datetime.datetime.fromtimestamp(
                    cached_file_stat.st_mtime)

                cached_file_lastmod = datetime.datetime.strptime(
                    file_meta['LastModified'], '%Y-%m-%dT%H:%M:%S.%fZ')
                if (cached_file_size == int(file_meta['Size'])
                        and cached_file_mtime > cached_file_lastmod):
                    log.debug('cached file size equal to metadata size and '
                              'cached file mtime later than metadata last '
                              'modification time.')
                    ret = s3.query(key=key,
                                   keyid=keyid,
                                   method='HEAD',
                                   bucket=bucket_name,
                                   service_url=service_url,
                                   verify_ssl=verify_ssl,
                                   path=_quote(path),
                                   local_file=cached_file_path)
                    if ret is not None:
                        for header in ret['headers']:
                            name, value = header.split(':', 1)
                            name = name.strip()
                            value = value.strip()
                            if name == 'Last-Modified':
                                s3_file_mtime = datetime.datetime.strptime(
                                    value, '%a, %d %b %Y %H:%M:%S %Z')
                            elif name == 'Content-Length':
                                s3_file_size = int(value)
                        if (cached_file_size == s3_file_size
                                and cached_file_mtime > s3_file_mtime):
                            log.info(
                                '{0} - {1} : {2} skipped download since cached file size '
                                'equal to and mtime after s3 values'.format(
                                    bucket_name, saltenv, path))
                            return

    # ... or get the file from S3
    s3.query(key=key,
             keyid=keyid,
             bucket=bucket_name,
             service_url=service_url,
             verify_ssl=verify_ssl,
             path=_quote(path),
             local_file=cached_file_path)
Example #22
0
def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
    '''
    Checks the local cache for the file, if it's old or missing go grab the
    file from S3 and update the cache
    '''
    key, keyid, service_url, verify_ssl = _get_s3_key()

    # check the local cache...
    if os.path.isfile(cached_file_path):
        file_meta = _find_file_meta(metadata, bucket_name, saltenv, path)
        if file_meta:
            file_etag = file_meta['ETag']

            if file_etag.find('-') == -1:
                file_md5 = file_etag
                cached_md5 = salt.utils.get_hash(cached_file_path, 'md5')

                # hashes match we have a cache hit
                if cached_md5 == file_md5:
                    return
            else:
                cached_file_stat = os.stat(cached_file_path)
                cached_file_size = cached_file_stat.st_size
                cached_file_mtime = datetime.datetime.fromtimestamp(
                    cached_file_stat.st_mtime)

                cached_file_lastmod = datetime.datetime.strptime(
                    file_meta['LastModified'], '%Y-%m-%dT%H:%M:%S.%fZ')
                if (cached_file_size == int(file_meta['Size']) and
                        cached_file_mtime > cached_file_lastmod):
                    log.debug('cached file size equal to metadata size and '
                              'cached file mtime later than metadata last '
                              'modification time.')
                    ret = s3.query(
                        key=key,
                        keyid=keyid,
                        method='HEAD',
                        bucket=bucket_name,
                        service_url=service_url,
                        verify_ssl=verify_ssl,
                        path=_quote(path),
                        local_file=cached_file_path
                    )
                    if ret is not None:
                        for header in ret['headers']:
                            name, value = header.split(':', 1)
                            name = name.strip()
                            value = value.strip()
                            if name == 'Last-Modified':
                                s3_file_mtime = datetime.datetime.strptime(
                                    value, '%a, %d %b %Y %H:%M:%S %Z')
                            elif name == 'Content-Length':
                                s3_file_size = int(value)
                        if (cached_file_size == s3_file_size and
                                cached_file_mtime > s3_file_mtime):
                            log.info(
                                '{0} - {1} : {2} skipped download since cached file size '
                                'equal to and mtime after s3 values'.format(
                                    bucket_name, saltenv, path))
                            return

    # ... or get the file from S3
    s3.query(
        key=key,
        keyid=keyid,
        bucket=bucket_name,
        service_url=service_url,
        verify_ssl=verify_ssl,
        path=_quote(path),
        local_file=cached_file_path
    )
Example #23
0
def query(key, keyid, method='GET', params=None, headers=None,
          requesturl=None, return_url=False, bucket=None, service_url=None,
          path='', return_bin=False, action=None, local_file=None,
          verify_ssl=True, full_headers=False, kms_keyid=None,
          location=None, role_arn=None, chunk_size=16384, path_style=False,
          https_enable=True):
    """
    Perform a query against an S3-like API. This function requires that a
    secret key and the id for that key are passed in. For instance:

        s3.keyid: GKTADJGHEIQSXMKKRBJ08H
        s3.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs

    If keyid or key is not specified, an attempt to fetch them from EC2 IAM
    metadata service will be made.

    A service_url may also be specified in the configuration:

        s3.service_url: s3.amazonaws.com

    If a service_url is not specified, the default is s3.amazonaws.com. This
    may appear in various documentation as an "endpoint". A comprehensive list
    for Amazon S3 may be found at::

        http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region

    The service_url will form the basis for the final endpoint that is used to
    query the service.

    Path style can be enabled:

        s3.path_style: True

    This can be useful if you need to use salt with a proxy for an s3 compatible storage

    You can use either https protocol or http protocol:

        s3.https_enable: True

    SSL verification may also be turned off in the configuration:

        s3.verify_ssl: False

    This is required if using S3 bucket names that contain a period, as
    these will not match Amazon's S3 wildcard certificates. Certificate
    verification is enabled by default.

    A region may be specified:

        s3.location: eu-central-1

    If region is not specified, an attempt to fetch the region from EC2 IAM
    metadata service will be made. Failing that, default is us-east-1
    """
    if not HAS_REQUESTS:
        log.error('There was an error: requests is required for s3 access')

    if not headers:
        headers = {}

    if not params:
        params = {}

    if not service_url:
        service_url = 's3.amazonaws.com'

    if not bucket or path_style:
        endpoint = service_url
    else:
        endpoint = '{0}.{1}'.format(bucket, service_url)

    if path_style and bucket:
        path = '{0}/{1}'.format(bucket, path)

    # Try grabbing the credentials from the EC2 instance IAM metadata if available
    if not key:
        key = salt.utils.aws.IROLE_CODE

    if not keyid:
        keyid = salt.utils.aws.IROLE_CODE

    if kms_keyid is not None and method in ('PUT', 'POST'):
        headers['x-amz-server-side-encryption'] = 'aws:kms'
        headers['x-amz-server-side-encryption-aws-kms-key-id'] = kms_keyid

    if not location:
        location = salt.utils.aws.get_location()

    data = ''
    fh = None
    payload_hash = None
    if method == 'PUT':
        if local_file:
            payload_hash = salt.utils.hashutils.get_hash(local_file, form='sha256')

    if path is None:
        path = ''
    path = _quote(path)

    if not requesturl:
        requesturl = (('https' if https_enable else 'http')+'://{0}/{1}').format(endpoint, path)
        headers, requesturl = salt.utils.aws.sig4(
            method,
            endpoint,
            params,
            data=data,
            uri='/{0}'.format(path),
            prov_dict={'id': keyid, 'key': key},
            role_arn=role_arn,
            location=location,
            product='s3',
            requesturl=requesturl,
            headers=headers,
            payload_hash=payload_hash,
        )

    log.debug('S3 Request: %s', requesturl)
    log.debug('S3 Headers::')
    log.debug('    Authorization: %s', headers['Authorization'])

    if not data:
        data = None

    try:
        if method == 'PUT':
            if local_file:
                fh = salt.utils.files.fopen(local_file, 'rb')  # pylint: disable=resource-leakage
                data = fh.read()  # pylint: disable=resource-leakage
            result = requests.request(method,
                                      requesturl,
                                      headers=headers,
                                      data=data,
                                      verify=verify_ssl,
                                      stream=True,
                                      timeout=300)
        elif method == 'GET' and local_file and not return_bin:
            result = requests.request(method,
                                      requesturl,
                                      headers=headers,
                                      data=data,
                                      verify=verify_ssl,
                                      stream=True,
                                      timeout=300)
        else:
            result = requests.request(method,
                                      requesturl,
                                      headers=headers,
                                      data=data,
                                      verify=verify_ssl,
                                      timeout=300)
    finally:
        if fh is not None:
            fh.close()

    err_code = None
    err_msg = None
    if result.status_code >= 400:
        # On error the S3 API response should contain error message
        err_text = result.content or 'Unknown error'
        log.debug('    Response content: %s', err_text)

        # Try to get err info from response xml
        try:
            err_data = xml.to_dict(ET.fromstring(err_text))
            err_code = err_data['Code']
            err_msg = err_data['Message']
        except (KeyError, ET.ParseError) as err:
            log.debug(
                'Failed to parse s3 err response. %s: %s',
                type(err).__name__, err
            )
            err_code = 'http-{0}'.format(result.status_code)
            err_msg = err_text

    if os.environ.get('MOCK_SLOW_DOWN'):
        result.status_code = 503
        err_code = 'SlowDown'
        err_msg = 'MOCK_SLOW_DOWN environment variable set. All S3 queries will fail for testing purposes.'

    log.debug('S3 Response Status Code: %s', result.status_code)

    if method == 'PUT':
        if result.status_code != 200:
            if local_file:
                raise CommandExecutionError(
                    'Failed to upload from {0} to {1}. {2}: {3}'.format(
                        local_file, path, err_code, err_msg))
            raise CommandExecutionError(
                'Failed to create bucket {0}. {1}: {2}'.format(
                    bucket, err_code, err_msg))

        if local_file:
            log.debug('Uploaded from %s to %s', local_file, path)
        else:
            log.debug('Created bucket %s', bucket)
        return None

    if method == 'DELETE':
        if not six.text_type(result.status_code).startswith('2'):
            if path:
                raise CommandExecutionError(
                    'Failed to delete {0} from bucket {1}. {2}: {3}'.format(
                        path, bucket, err_code, err_msg))
            raise CommandExecutionError(
                'Failed to delete bucket {0}. {1}: {2}'.format(
                    bucket, err_code, err_msg))

        if path:
            log.debug('Deleted %s from bucket %s', path, bucket)
        else:
            log.debug('Deleted bucket %s', bucket)
        return None

    sortof_ok = ['SlowDown', 'ServiceUnavailable', 'RequestTimeTooSkewed',
        'RequestTimeout', 'OperationAborted', 'InternalError',
        'AccessDenied']

    # This can be used to save a binary object to disk
    if local_file and method == 'GET':
        if result.status_code < 200 or result.status_code >= 300:
            if err_code in sortof_ok:
                log.error('Failed to get file=%s. %s: %s', path, err_code, err_msg)
                return None
            raise CommandExecutionError(
                'Failed to get file=%s. {0}: {1}'.format(path, err_code, err_msg))

        log.debug('Saving to local file: %s', local_file)
        with salt.utils.files.fopen(local_file, 'wb') as out:
            for chunk in result.iter_content(chunk_size=chunk_size):
                out.write(chunk)
        return 'Saved to local file: {0}'.format(local_file)

    if result.status_code < 200 or result.status_code >= 300:
        if err_code in sortof_ok:
            log.error('Failed s3 operation. %s: %s', err_code, err_msg)
            return None
        raise CommandExecutionError(
            'Failed s3 operation. {0}: {1}'.format(err_code, err_msg))

    # This can be used to return a binary object wholesale
    if return_bin:
        return result.content

    if result.content:
        items = ET.fromstring(result.content)

        ret = []
        for item in items:
            ret.append(xml.to_dict(item))

        if return_url is True:
            return ret, requesturl
    else:
        if result.status_code != requests.codes.ok:
            return None
        ret = {'headers': []}
        if full_headers:
            ret['headers'] = dict(result.headers)
        else:
            for header in result.headers:
                ret['headers'].append(header.strip())

    return ret
Example #24
0
def query(
    key,
    keyid,
    method="GET",
    params=None,
    headers=None,
    requesturl=None,
    return_url=False,
    bucket=None,
    service_url=None,
    path="",
    return_bin=False,
    action=None,
    local_file=None,
    verify_ssl=True,
    full_headers=False,
    kms_keyid=None,
    location=None,
    role_arn=None,
    chunk_size=16384,
    path_style=False,
    https_enable=True,
):
    """
    Perform a query against an S3-like API. This function requires that a
    secret key and the id for that key are passed in. For instance:

        s3.keyid: GKTADJGHEIQSXMKKRBJ08H
        s3.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs

    If keyid or key is not specified, an attempt to fetch them from EC2 IAM
    metadata service will be made.

    A service_url may also be specified in the configuration:

        s3.service_url: s3.amazonaws.com

    If a service_url is not specified, the default is s3.amazonaws.com. This
    may appear in various documentation as an "endpoint". A comprehensive list
    for Amazon S3 may be found at::

        http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region

    The service_url will form the basis for the final endpoint that is used to
    query the service.

    Path style can be enabled:

        s3.path_style: True

    This can be useful if you need to use salt with a proxy for an s3 compatible storage

    You can use either https protocol or http protocol:

        s3.https_enable: True

    SSL verification may also be turned off in the configuration:

        s3.verify_ssl: False

    This is required if using S3 bucket names that contain a period, as
    these will not match Amazon's S3 wildcard certificates. Certificate
    verification is enabled by default.

    A region may be specified:

        s3.location: eu-central-1

    If region is not specified, an attempt to fetch the region from EC2 IAM
    metadata service will be made. Failing that, default is us-east-1
    """
    if not HAS_REQUESTS:
        log.error("There was an error: requests is required for s3 access")

    if not headers:
        headers = {}

    if not params:
        params = {}

    if not service_url:
        service_url = "s3.amazonaws.com"

    if not bucket or path_style:
        endpoint = service_url
    else:
        endpoint = "{0}.{1}".format(bucket, service_url)

    if path_style and bucket:
        path = "{0}/{1}".format(bucket, path)

    # Try grabbing the credentials from the EC2 instance IAM metadata if available
    if not key:
        key = salt.utils.aws.IROLE_CODE

    if not keyid:
        keyid = salt.utils.aws.IROLE_CODE

    if kms_keyid is not None and method in ("PUT", "POST"):
        headers["x-amz-server-side-encryption"] = "aws:kms"
        headers["x-amz-server-side-encryption-aws-kms-key-id"] = kms_keyid

    if not location:
        location = salt.utils.aws.get_location()

    data = ""
    fh = None
    payload_hash = None
    if method == "PUT":
        if local_file:
            payload_hash = salt.utils.hashutils.get_hash(local_file,
                                                         form="sha256")

    if path is None:
        path = ""
    path = _quote(path)

    if not requesturl:
        requesturl = (("https" if https_enable else "http") +
                      "://{0}/{1}").format(endpoint, path)
        headers, requesturl = salt.utils.aws.sig4(
            method,
            endpoint,
            params,
            data=data,
            uri="/{0}".format(path),
            prov_dict={
                "id": keyid,
                "key": key
            },
            role_arn=role_arn,
            location=location,
            product="s3",
            requesturl=requesturl,
            headers=headers,
            payload_hash=payload_hash,
        )

    log.debug("S3 Request: %s", requesturl)
    log.debug("S3 Headers::")
    log.debug("    Authorization: %s", headers["Authorization"])

    if not data:
        data = None

    try:
        if method == "PUT":
            if local_file:
                # pylint: disable=resource-leakage
                fh = salt.utils.files.fopen(local_file, "rb")
                # pylint: enable=resource-leakage
                data = fh.read()  # pylint: disable=resource-leakage
            result = requests.request(
                method,
                requesturl,
                headers=headers,
                data=data,
                verify=verify_ssl,
                stream=True,
                timeout=300,
            )
        elif method == "GET" and local_file and not return_bin:
            result = requests.request(
                method,
                requesturl,
                headers=headers,
                data=data,
                verify=verify_ssl,
                stream=True,
                timeout=300,
            )
        else:
            result = requests.request(
                method,
                requesturl,
                headers=headers,
                data=data,
                verify=verify_ssl,
                timeout=300,
            )
    finally:
        if fh is not None:
            fh.close()

    err_code = None
    err_msg = None
    if result.status_code >= 400:
        # On error the S3 API response should contain error message
        err_text = result.content or "Unknown error"
        log.debug("    Response content: %s", err_text)

        # Try to get err info from response xml
        try:
            err_data = xml.to_dict(ET.fromstring(err_text))
            err_code = err_data["Code"]
            err_msg = err_data["Message"]
        except (KeyError, ET.ParseError) as err:
            log.debug("Failed to parse s3 err response. %s: %s",
                      type(err).__name__, err)
            err_code = "http-{0}".format(result.status_code)
            err_msg = err_text

    log.debug("S3 Response Status Code: %s", result.status_code)

    if method == "PUT":
        if result.status_code != 200:
            if local_file:
                raise CommandExecutionError(
                    "Failed to upload from {0} to {1}. {2}: {3}".format(
                        local_file, path, err_code, err_msg))
            raise CommandExecutionError(
                "Failed to create bucket {0}. {1}: {2}".format(
                    bucket, err_code, err_msg))

        if local_file:
            log.debug("Uploaded from %s to %s", local_file, path)
        else:
            log.debug("Created bucket %s", bucket)
        return

    if method == "DELETE":
        if not six.text_type(result.status_code).startswith("2"):
            if path:
                raise CommandExecutionError(
                    "Failed to delete {0} from bucket {1}. {2}: {3}".format(
                        path, bucket, err_code, err_msg))
            raise CommandExecutionError(
                "Failed to delete bucket {0}. {1}: {2}".format(
                    bucket, err_code, err_msg))

        if path:
            log.debug("Deleted %s from bucket %s", path, bucket)
        else:
            log.debug("Deleted bucket %s", bucket)
        return

    # This can be used to save a binary object to disk
    if local_file and method == "GET":
        if result.status_code < 200 or result.status_code >= 300:
            raise CommandExecutionError("Failed to get file. {0}: {1}".format(
                err_code, err_msg))

        log.debug("Saving to local file: %s", local_file)
        with salt.utils.files.fopen(local_file, "wb") as out:
            for chunk in result.iter_content(chunk_size=chunk_size):
                out.write(chunk)
        return "Saved to local file: {0}".format(local_file)

    if result.status_code < 200 or result.status_code >= 300:
        raise CommandExecutionError("Failed s3 operation. {0}: {1}".format(
            err_code, err_msg))

    # This can be used to return a binary object wholesale
    if return_bin:
        return result.content

    if result.content:
        items = ET.fromstring(result.content)

        ret = []
        for item in items:
            ret.append(xml.to_dict(item))

        if return_url is True:
            return ret, requesturl
    else:
        if result.status_code != requests.codes.ok:
            return
        ret = {"headers": []}
        if full_headers:
            ret["headers"] = dict(result.headers)
        else:
            for header in result.headers:
                ret["headers"].append(header.strip())

    return ret
Example #25
0
def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
    '''
    Checks the local cache for the file, if it's old or missing go grab the
    file from S3 and update the cache
    '''
    key, keyid, service_url, verify_ssl, kms_keyid, location, path_style, https_enable = _get_s3_key()

    # check the local cache...
    if os.path.isfile(cached_file_path):
        file_meta = _find_file_meta(metadata, bucket_name, saltenv, path)
        if file_meta:
            file_etag = file_meta['ETag']

            if file_etag.find('-') == -1:
                file_md5 = file_etag
                cached_md5 = salt.utils.hashutils.get_hash(cached_file_path, 'md5')

                # hashes match we have a cache hit
                if cached_md5 == file_md5:
                    return
            else:
                cached_file_stat = os.stat(cached_file_path)
                cached_file_size = cached_file_stat.st_size
                cached_file_mtime = datetime.datetime.fromtimestamp(
                    cached_file_stat.st_mtime)

                cached_file_lastmod = datetime.datetime.strptime(
                    file_meta['LastModified'], '%Y-%m-%dT%H:%M:%S.%fZ')
                if (cached_file_size == int(file_meta['Size']) and
                        cached_file_mtime > cached_file_lastmod):
                    log.debug('cached file size equal to metadata size and '
                              'cached file mtime later than metadata last '
                              'modification time.')
                    ret = __utils__['s3.query'](
                        key=key,
                        keyid=keyid,
                        kms_keyid=keyid,
                        method='HEAD',
                        bucket=bucket_name,
                        service_url=service_url,
                        verify_ssl=verify_ssl,
                        location=location,
                        path=_quote(path),
                        local_file=cached_file_path,
                        full_headers=True,
                        path_style=path_style,
                        https_enable=https_enable
                    )
                    if ret is not None:
                        for header_name, header_value in ret['headers'].items():
                            name = header_name.strip()
                            value = header_value.strip()
                            if six.text_type(name).lower() == 'last-modified':
                                s3_file_mtime = datetime.datetime.strptime(
                                    value, '%a, %d %b %Y %H:%M:%S %Z')
                            elif six.text_type(name).lower() == 'content-length':
                                s3_file_size = int(value)
                        if (cached_file_size == s3_file_size and
                                cached_file_mtime > s3_file_mtime):
                            log.info(
                                '%s - %s : %s skipped download since cached file size '
                                'equal to and mtime after s3 values',
                                bucket_name, saltenv, path
                            )
                            return

    # ... or get the file from S3
    __utils__['s3.query'](
        key=key,
        keyid=keyid,
        kms_keyid=keyid,
        bucket=bucket_name,
        service_url=service_url,
        verify_ssl=verify_ssl,
        location=location,
        path=_quote(path),
        local_file=cached_file_path,
        path_style=path_style,
        https_enable=https_enable,
    )
Example #26
0
def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
    """
    Checks the local cache for the file, if it's old or missing go grab the
    file from S3 and update the cache
    """
    (
        key,
        keyid,
        service_url,
        verify_ssl,
        kms_keyid,
        location,
        path_style,
        https_enable,
    ) = _get_s3_key()

    # check the local cache...
    if os.path.isfile(cached_file_path):
        file_meta = _find_file_meta(metadata, bucket_name, saltenv, path)
        if file_meta:
            file_etag = file_meta["ETag"]

            if file_etag.find("-") == -1:
                file_md5 = file_etag
                cached_md5 = salt.utils.hashutils.get_hash(
                    cached_file_path, "md5")

                # hashes match we have a cache hit
                if cached_md5 == file_md5:
                    return
            else:
                cached_file_stat = os.stat(cached_file_path)
                cached_file_size = cached_file_stat.st_size
                cached_file_mtime = datetime.datetime.fromtimestamp(
                    cached_file_stat.st_mtime)

                cached_file_lastmod = datetime.datetime.strptime(
                    file_meta["LastModified"], "%Y-%m-%dT%H:%M:%S.%fZ")
                if (cached_file_size == int(file_meta["Size"])
                        and cached_file_mtime > cached_file_lastmod):
                    log.debug("cached file size equal to metadata size and "
                              "cached file mtime later than metadata last "
                              "modification time.")
                    ret = __utils__["s3.query"](
                        key=key,
                        keyid=keyid,
                        kms_keyid=keyid,
                        method="HEAD",
                        bucket=bucket_name,
                        service_url=service_url,
                        verify_ssl=verify_ssl,
                        location=location,
                        path=_quote(path),
                        local_file=cached_file_path,
                        full_headers=True,
                        path_style=path_style,
                        https_enable=https_enable,
                    )
                    if ret is not None:
                        for header_name, header_value in ret["headers"].items(
                        ):
                            name = header_name.strip()
                            value = header_value.strip()
                            if six.text_type(name).lower() == "last-modified":
                                s3_file_mtime = datetime.datetime.strptime(
                                    value, "%a, %d %b %Y %H:%M:%S %Z")
                            elif six.text_type(
                                    name).lower() == "content-length":
                                s3_file_size = int(value)
                        if (cached_file_size == s3_file_size
                                and cached_file_mtime > s3_file_mtime):
                            log.info(
                                "%s - %s : %s skipped download since cached file size "
                                "equal to and mtime after s3 values",
                                bucket_name,
                                saltenv,
                                path,
                            )
                            return

    # ... or get the file from S3
    __utils__["s3.query"](
        key=key,
        keyid=keyid,
        kms_keyid=keyid,
        bucket=bucket_name,
        service_url=service_url,
        verify_ssl=verify_ssl,
        location=location,
        path=_quote(path),
        local_file=cached_file_path,
        path_style=path_style,
        https_enable=https_enable,
    )
Example #27
0
def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
    """
    Checks the local cache for the file, if it's old or missing go grab the
    file from S3 and update the cache
    """
    s3_key_kwargs = _get_s3_key()

    def _get_file():
        """
        Helper function that gets the file from S3 and checks if it can be skipped.
        Returns True if the file was downloaded and False if the download was skipped.
        """
        ret = __utils__['s3.query'](key=s3_key_kwargs['key'],
                                    keyid=s3_key_kwargs['keyid'],
                                    kms_keyid=s3_key_kwargs['keyid'],
                                    method='HEAD',
                                    bucket=bucket_name,
                                    service_url=s3_key_kwargs['service_url'],
                                    verify_ssl=s3_key_kwargs['verify_ssl'],
                                    location=s3_key_kwargs['location'],
                                    path=_quote(path),
                                    local_file=cached_file_path,
                                    full_headers=True,
                                    path_style=s3_key_kwargs['path_style'],
                                    https_enable=s3_key_kwargs['https_enable'])
        if ret:
            for header_name, header_value in ret['headers'].items():
                header_name = header_name.strip()
                header_value = header_value.strip()
                if six.text_type(header_name).lower() == 'last-modified':
                    s3_file_mtime = datetime.datetime.strptime(
                        header_value, '%a, %d %b %Y %H:%M:%S %Z')
                elif six.text_type(header_name).lower() == 'content-length':
                    s3_file_size = int(header_value)
            if cached_file_data['size'] == s3_file_size and \
                    cached_file_data['mtime'] > s3_file_mtime:
                log.info(
                    '%s - %s : %s skipped download since cached file size '
                    'equal to and mtime after s3 values', bucket_name, saltenv,
                    path)
                return False
        return True

    # check the local cache...
    if os.path.isfile(cached_file_path):
        file_meta = _find_file_meta(metadata, bucket_name, saltenv, path)
        if file_meta:
            if file_meta['ETag'].find('-') == -1:
                cached_md5 = salt.utils.hashutils.get_hash(
                    cached_file_path, 'md5')

                # hashes match we have a cache hit
                if cached_md5 == file_meta['ETag']:
                    return
            else:
                cached_file_stat = os.stat(cached_file_path)
                cached_file_data = {
                    'size':
                    cached_file_stat.st_size,
                    'mtime':
                    datetime.datetime.fromtimestamp(cached_file_stat.st_mtime),
                    'lastmod':
                    datetime.datetime.strptime(file_meta['LastModified'],
                                               '%Y-%m-%dT%H:%M:%S.%fZ')
                }

                if (cached_file_data['size'] == int(file_meta['Size'])
                        and cached_file_data['mtime'] >
                        cached_file_data['lastmod']):
                    log.debug('cached file size equal to metadata size and '
                              'cached file mtime later than metadata last '
                              'modification time.')
                    if not _get_file():
                        # skipped download
                        return

    # ... or get the file from S3
    __utils__['s3.query'](
        key=s3_key_kwargs['key'],
        keyid=s3_key_kwargs['keyid'],
        kms_keyid=s3_key_kwargs['keyid'],
        bucket=bucket_name,
        service_url=s3_key_kwargs['service_url'],
        verify_ssl=s3_key_kwargs['verify_ssl'],
        location=s3_key_kwargs['location'],
        path=_quote(path),
        local_file=cached_file_path,
        path_style=s3_key_kwargs['path_style'],
        https_enable=s3_key_kwargs['https_enable'],
    )
Example #28
0
def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
    '''
    Checks the local cache for the file, if it's old or missing go grab the
    file from S3 and update the cache
    '''
    key, keyid, service_url, verify_ssl, kms_keyid, location, path_style, https_enable = _get_s3_key(
    )

    # check the local cache...
    if os.path.isfile(cached_file_path):
        file_meta = _find_file_meta(metadata, bucket_name, saltenv, path)
        if file_meta:
            file_etag = file_meta['ETag']

            if file_etag.find('-') == -1:
                file_md5 = file_etag
                cached_md5 = salt.utils.hashutils.get_hash(
                    cached_file_path, 'md5')

                # hashes match we have a cache hit
                if cached_md5 == file_md5:
                    return
            else:
                cached_file_stat = os.stat(cached_file_path)
                cached_file_size = cached_file_stat.st_size
                cached_file_mtime = datetime.datetime.fromtimestamp(
                    cached_file_stat.st_mtime)

                cached_file_lastmod = datetime.datetime.strptime(
                    file_meta['LastModified'], '%Y-%m-%dT%H:%M:%S.%fZ')
                if (cached_file_size == int(file_meta['Size'])
                        and cached_file_mtime > cached_file_lastmod):
                    log.debug('cached file size equal to metadata size and '
                              'cached file mtime later than metadata last '
                              'modification time.')
                    ret = __utils__['s3.query'](key=key,
                                                keyid=keyid,
                                                kms_keyid=keyid,
                                                method='HEAD',
                                                bucket=bucket_name,
                                                service_url=service_url,
                                                verify_ssl=verify_ssl,
                                                location=location,
                                                path=_quote(path),
                                                local_file=cached_file_path,
                                                full_headers=True,
                                                path_style=path_style,
                                                https_enable=https_enable)
                    if ret is not None:
                        for header_name, header_value in ret['headers'].items(
                        ):
                            name = header_name.strip()
                            value = header_value.strip()
                            if six.text_type(name).lower() == 'last-modified':
                                s3_file_mtime = datetime.datetime.strptime(
                                    value, '%a, %d %b %Y %H:%M:%S %Z')
                            elif six.text_type(
                                    name).lower() == 'content-length':
                                s3_file_size = int(value)
                        if (cached_file_size == s3_file_size
                                and cached_file_mtime > s3_file_mtime):
                            log.info(
                                '%s - %s : %s skipped download since cached file size '
                                'equal to and mtime after s3 values',
                                bucket_name, saltenv, path)
                            return

    # ... or get the file from S3
    __utils__['s3.query'](
        key=key,
        keyid=keyid,
        kms_keyid=keyid,
        bucket=bucket_name,
        service_url=service_url,
        verify_ssl=verify_ssl,
        location=location,
        path=_quote(path),
        local_file=cached_file_path,
        path_style=path_style,
        https_enable=https_enable,
    )