Example #1
0
def _rootpath2url(rootpath, path):
  rootpath = os.path.abspath(rootpath)
  drive, rootpath = os.path.splitdrive(rootpath)
  if os.sep != '/':
    rootpath = rootpath.replace(os.sep, '/')
  rootpath = _quote(rootpath)
  path = _quote(path)
  if drive:
    url = 'file:///' + drive + rootpath + '/' + path
  else:
    url = 'file://' + rootpath + '/' + path
  return core.svn_path_canonicalize(url)
Example #2
0
def _rootpath2url(rootpath, path):
    rootpath = os.path.abspath(rootpath)
    drive, rootpath = os.path.splitdrive(rootpath)
    if os.sep != "/":
        rootpath = rootpath.replace(os.sep, "/")
    rootpath = _quote(rootpath)
    path = _quote(path)
    if drive:
        url = "file:///" + drive + rootpath + "/" + path
    else:
        url = "file://" + rootpath + "/" + path
    return core.svn_path_canonicalize(url)
Example #3
0
 def _get_file():
     """
     Helper function that gets the file from S3 and checks if it can be skipped.
     Returns True if the file was downloaded and False if the download was skipped.
     """
     ret = __utils__['s3.query'](key=s3_key_kwargs['key'],
                                 keyid=s3_key_kwargs['keyid'],
                                 kms_keyid=s3_key_kwargs['keyid'],
                                 method='HEAD',
                                 bucket=bucket_name,
                                 service_url=s3_key_kwargs['service_url'],
                                 verify_ssl=s3_key_kwargs['verify_ssl'],
                                 location=s3_key_kwargs['location'],
                                 path=_quote(path),
                                 local_file=cached_file_path,
                                 full_headers=True,
                                 path_style=s3_key_kwargs['path_style'],
                                 https_enable=s3_key_kwargs['https_enable'])
     if ret:
         for header_name, header_value in ret['headers'].items():
             header_name = header_name.strip()
             header_value = header_value.strip()
             if str(header_name).lower() == 'last-modified':
                 s3_file_mtime = datetime.datetime.strptime(
                     header_value, '%a, %d %b %Y %H:%M:%S %Z')
             elif str(header_name).lower() == 'content-length':
                 s3_file_size = int(header_value)
         if cached_file_data['size'] == s3_file_size and \
                 cached_file_data['mtime'] > s3_file_mtime:
             log.info(
                 '%s - %s : %s skipped download since cached file size '
                 'equal to and mtime after s3 values', bucket_name, saltenv,
                 path)
             return False
     return True
Example #4
0
def quote(string, safe='/'):
    """ Quote a string for use in URIs.
    """
    try:
        return _quote(string, safe)
    except UnicodeEncodeError:
        return string
Example #5
0
def quote(s, safe=b'/'):
    s = s.encode('utf-8') if isinstance(s, str) else s
    s = _quote(s, safe)
    # PY3 always returns unicode.  PY2 may return either, depending on whether
    # it had to modify the string.
    if isinstance(s, bytes):
        s = s.decode('utf-8')
    return s
Example #6
0
def quote(v):
    if isinstance(v, bool):
        return '1' if v else '0'
    if isinstance(v, (int, float)):
        return str(v)
    if isinstance(v, (str, bytes)):
        return _quote(v)
    if isinstance(v, datetime):
        return str(v.isoformat())
Example #7
0
def quote(text, *args, **kwargs):
	t = type(text)
	if t is str:
		converted_text = text.encode('utf-8')
	else:
		try:
			converted_text = str(text).encode('utf-8')
		except:
			raise TypeError("Could not convert " + t + " to str with utf-8 encoding")
	return _quote(converted_text, *args, **kwargs)
Example #8
0
def quote(text, *args, **kwargs):
    t = type(text)
    if t is str:
        converted_text = text.encode('utf-8')
    else:
        try:
            converted_text = str(text).encode('utf-8')
        except:
            raise TypeError("Could not convert " + t +
                            " to str with utf-8 encoding")
    return _quote(converted_text, *args, **kwargs)
Example #9
0
    def __init__(self, keyword, num_pages, url_queue):
        with self.__cntLock:
            if len(self.__instanceList) >= 1:
                raise RuntimeError(
                    'not support to run larger than 1 instance of ProcedurePageParser at the same time'
                )
            self.__instanceList.append(self)

        self._urlQueue = url_queue
        self._numPages = int(num_pages)
        self._keyword = str(keyword)
        self._keywordQuoted = _quote(self._keyword)
Example #10
0
 def _geturl(self, path=None):
     if not path:
         return self.rootpath
     path = self.rootpath + '/' + _quote(path)
     return core.svn_path_canonicalize(path)
Example #11
0
def quote(data, safe=b''):
    if isinstance(data, str):
        data = data.encode('utf-8')
    return _quote(data, safe)
Example #12
0
def quote(s, safe=b'/'):
    return to_unicode(_quote(to_bytes(s), safe))
Example #13
0
def quote(value):
    value = value.encode('utf8', 'ignore') if isinstance(value, six.string_types) else str(value)
    return _quote(value, safe='')
Example #14
0
def url_escape(s: str) -> str:
    if s:
        s = _quote(s, safe=' ')
        s = s.replace(' ', '+')
    return s
Example #15
0
def quote(value):
    value = value.encode("utf8", "ignore") if isinstance(value, six.string_types) else str(value)
    return _quote(value, safe="")
Example #16
0
def query(key, keyid, method='GET', params=None, headers=None,
          requesturl=None, return_url=False, bucket=None, service_url=None,
          path='', return_bin=False, action=None, local_file=None,
          verify_ssl=True, full_headers=False, kms_keyid=None,
          location=None, role_arn=None, chunk_size=16384, path_style=False,
          https_enable=True):
    """
    Perform a query against an S3-like API. This function requires that a
    secret key and the id for that key are passed in. For instance:

        s3.keyid: GKTADJGHEIQSXMKKRBJ08H
        s3.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs

    If keyid or key is not specified, an attempt to fetch them from EC2 IAM
    metadata service will be made.

    A service_url may also be specified in the configuration:

        s3.service_url: s3.amazonaws.com

    If a service_url is not specified, the default is s3.amazonaws.com. This
    may appear in various documentation as an "endpoint". A comprehensive list
    for Amazon S3 may be found at::

        http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region

    The service_url will form the basis for the final endpoint that is used to
    query the service.

    Path style can be enabled:

        s3.path_style: True

    This can be useful if you need to use salt with a proxy for an s3 compatible storage

    You can use either https protocol or http protocol:

        s3.https_enable: True

    SSL verification may also be turned off in the configuration:

        s3.verify_ssl: False

    This is required if using S3 bucket names that contain a period, as
    these will not match Amazon's S3 wildcard certificates. Certificate
    verification is enabled by default.

    A region may be specified:

        s3.location: eu-central-1

    If region is not specified, an attempt to fetch the region from EC2 IAM
    metadata service will be made. Failing that, default is us-east-1
    """
    if not HAS_REQUESTS:
        log.error('There was an error: requests is required for s3 access')

    if not headers:
        headers = {}

    if not params:
        params = {}

    if not service_url:
        service_url = 's3.amazonaws.com'

    if not bucket or path_style:
        endpoint = service_url
    else:
        endpoint = '{0}.{1}'.format(bucket, service_url)

    if path_style and bucket:
        path = '{0}/{1}'.format(bucket, path)

    # Try grabbing the credentials from the EC2 instance IAM metadata if available
    if not key:
        key = hubblestack.utils.aws.IROLE_CODE

    if not keyid:
        keyid = hubblestack.utils.aws.IROLE_CODE

    if kms_keyid is not None and method in ('PUT', 'POST'):
        headers['x-amz-server-side-encryption'] = 'aws:kms'
        headers['x-amz-server-side-encryption-aws-kms-key-id'] = kms_keyid

    if not location:
        location = hubblestack.utils.aws.get_location()

    data = ''
    fh = None
    payload_hash = None
    if method == 'PUT':
        if local_file:
            payload_hash = hubblestack.utils.hashutils.get_hash(local_file, form='sha256')

    if path is None:
        path = ''
    path = _quote(path)

    if not requesturl:
        requesturl = (('https' if https_enable else 'http')+'://{0}/{1}').format(endpoint, path)
        headers, requesturl = hubblestack.utils.aws.sig4(
            method,
            endpoint,
            params,
            data=data,
            uri='/{0}'.format(path),
            prov_dict={'id': keyid, 'key': key},
            role_arn=role_arn,
            location=location,
            product='s3',
            requesturl=requesturl,
            headers=headers,
            payload_hash=payload_hash,
        )

    log.debug('S3 Request: %s', requesturl)
    log.debug('S3 Headers::')
    log.debug('    Authorization: %s', headers['Authorization'])

    if not data:
        data = None

    try:
        if method == 'PUT':
            if local_file:
                fh = hubblestack.utils.files.fopen(local_file, 'rb')  # pylint: disable=resource-leakage
                data = fh.read()  # pylint: disable=resource-leakage
            result = requests.request(method,
                                      requesturl,
                                      headers=headers,
                                      data=data,
                                      verify=verify_ssl,
                                      stream=True,
                                      timeout=300)
        elif method == 'GET' and local_file and not return_bin:
            result = requests.request(method,
                                      requesturl,
                                      headers=headers,
                                      data=data,
                                      verify=verify_ssl,
                                      stream=True,
                                      timeout=300)
        else:
            result = requests.request(method,
                                      requesturl,
                                      headers=headers,
                                      data=data,
                                      verify=verify_ssl,
                                      timeout=300)
    finally:
        if fh is not None:
            fh.close()

    err_code = None
    err_msg = None
    if result.status_code >= 400:
        # On error the S3 API response should contain error message
        err_text = result.content or 'Unknown error'
        log.debug('    Response content: %s', err_text)

        # Try to get err info from response xml
        try:
            err_data = xml.to_dict(ET.fromstring(err_text))
            err_code = err_data['Code']
            err_msg = err_data['Message']
        except (KeyError, ET.ParseError) as err:
            log.debug(
                'Failed to parse s3 err response. %s: %s',
                type(err).__name__, err
            )
            err_code = 'http-{0}'.format(result.status_code)
            err_msg = err_text

    if os.environ.get('MOCK_SLOW_DOWN'):
        result.status_code = 503
        err_code = 'SlowDown'
        err_msg = 'MOCK_SLOW_DOWN environment variable set. All S3 queries will fail for testing purposes.'

    log.debug('S3 Response Status Code: %s', result.status_code)

    if method == 'PUT':
        if result.status_code != 200:
            if local_file:
                raise CommandExecutionError(
                    'Failed to upload from {0} to {1}. {2}: {3}'.format(
                        local_file, path, err_code, err_msg))
            raise CommandExecutionError(
                'Failed to create bucket {0}. {1}: {2}'.format(
                    bucket, err_code, err_msg))

        if local_file:
            log.debug('Uploaded from %s to %s', local_file, path)
        else:
            log.debug('Created bucket %s', bucket)
        return None

    if method == 'DELETE':
        if not str(result.status_code).startswith('2'):
            if path:
                raise CommandExecutionError(
                    'Failed to delete {0} from bucket {1}. {2}: {3}'.format(
                        path, bucket, err_code, err_msg))
            raise CommandExecutionError(
                'Failed to delete bucket {0}. {1}: {2}'.format(
                    bucket, err_code, err_msg))

        if path:
            log.debug('Deleted %s from bucket %s', path, bucket)
        else:
            log.debug('Deleted bucket %s', bucket)
        return None

    sortof_ok = ['SlowDown', 'ServiceUnavailable', 'RequestTimeTooSkewed',
        'RequestTimeout', 'OperationAborted', 'InternalError',
        'AccessDenied']

    # This can be used to save a binary object to disk
    if local_file and method == 'GET':
        if result.status_code < 200 or result.status_code >= 300:
            if err_code in sortof_ok:
                log.error('Failed to get file=%s. %s: %s', path, err_code, err_msg)
                return None
            raise CommandExecutionError(
                'Failed to get file=%s. {0}: {1}'.format(path, err_code, err_msg))

        log.debug('Saving to local file: %s', local_file)
        with hubblestack.utils.files.fopen(local_file, 'wb') as out:
            for chunk in result.iter_content(chunk_size=chunk_size):
                out.write(chunk)
        return 'Saved to local file: {0}'.format(local_file)

    if result.status_code < 200 or result.status_code >= 300:
        if err_code in sortof_ok:
            log.error('Failed s3 operation. %s: %s', err_code, err_msg)
            return None
        raise CommandExecutionError(
            'Failed s3 operation. {0}: {1}'.format(err_code, err_msg))

    # This can be used to return a binary object wholesale
    if return_bin:
        return result.content

    if result.content:
        items = ET.fromstring(result.content)

        ret = []
        for item in items:
            ret.append(xml.to_dict(item))

        if return_url is True:
            return ret, requesturl
    else:
        if result.status_code != requests.codes.ok:
            return None
        ret = {'headers': []}
        if full_headers:
            ret['headers'] = dict(result.headers)
        else:
            for header in result.headers:
                ret['headers'].append(header.strip())

    return ret
def quote(string):
    return _quote(string, safe="@")
Example #18
0
 def quote(string, *args, **kwargs):
     return _quote(string.decode('charmap'), *args, **kwargs).encode('charmap')
Example #19
0
        "OKBLUE": "\u001b[94m",
        "OKGREEN": "\u001b[92m",
        "WARNING": "\u001b[93m",
        "FAIL": "\u001b[91m",
        "BOLD": "\u001b[1m",
        "UNDERLINE": "\u001b[4m",
    }
    _ENDC = "\u001b[0m"
    _ = _map.get(col.upper())
    return f"{_}{s}{_ENDC}"


def c_print(s: str, c: str) -> None:
    return print(c_print_as_str(s, c))


def check_password_hash(_hash: str, pw: str) -> bool:
    meth = pwhash.pbkdf2_sha512
    return meth.verify(pw, _hash)


def hash_password(pw: str) -> str:
    meth = pwhash.pbkdf2_sha512
    return meth.hash(pw)


now = lambda: time.time()
quote = lambda x: _quote(x, safe="")

get_site_host = lambda x: urlparse(x).netloc
Example #20
0
def quote(data, safe=b''):
    if isinstance(data, str):
        data = data.encode('utf-8')
    return _quote(data, safe)
Example #21
0
def _get_file_from_s3(metadata, saltenv, bucket_name, path, cached_file_path):
    """
    Checks the local cache for the file, if it's old or missing go grab the
    file from S3 and update the cache
    """
    s3_key_kwargs = _get_s3_key()

    def _get_file():
        """
        Helper function that gets the file from S3 and checks if it can be skipped.
        Returns True if the file was downloaded and False if the download was skipped.
        """
        ret = __utils__['s3.query'](key=s3_key_kwargs['key'],
                                    keyid=s3_key_kwargs['keyid'],
                                    kms_keyid=s3_key_kwargs['keyid'],
                                    method='HEAD',
                                    bucket=bucket_name,
                                    service_url=s3_key_kwargs['service_url'],
                                    verify_ssl=s3_key_kwargs['verify_ssl'],
                                    location=s3_key_kwargs['location'],
                                    path=_quote(path),
                                    local_file=cached_file_path,
                                    full_headers=True,
                                    path_style=s3_key_kwargs['path_style'],
                                    https_enable=s3_key_kwargs['https_enable'])
        if ret:
            for header_name, header_value in ret['headers'].items():
                header_name = header_name.strip()
                header_value = header_value.strip()
                if str(header_name).lower() == 'last-modified':
                    s3_file_mtime = datetime.datetime.strptime(
                        header_value, '%a, %d %b %Y %H:%M:%S %Z')
                elif str(header_name).lower() == 'content-length':
                    s3_file_size = int(header_value)
            if cached_file_data['size'] == s3_file_size and \
                    cached_file_data['mtime'] > s3_file_mtime:
                log.info(
                    '%s - %s : %s skipped download since cached file size '
                    'equal to and mtime after s3 values', bucket_name, saltenv,
                    path)
                return False
        return True

    # check the local cache...
    if os.path.isfile(cached_file_path):
        file_meta = _find_file_meta(metadata, bucket_name, saltenv, path)
        if file_meta:
            if file_meta['ETag'].find('-') == -1:
                cached_md5 = hubblestack.utils.hashutils.get_hash(
                    cached_file_path, 'md5')

                # hashes match we have a cache hit
                if cached_md5 == file_meta['ETag']:
                    return
            else:
                cached_file_stat = os.stat(cached_file_path)
                cached_file_data = {
                    'size':
                    cached_file_stat.st_size,
                    'mtime':
                    datetime.datetime.fromtimestamp(cached_file_stat.st_mtime),
                    'lastmod':
                    datetime.datetime.strptime(file_meta['LastModified'],
                                               '%Y-%m-%dT%H:%M:%S.%fZ')
                }

                if (cached_file_data['size'] == int(file_meta['Size'])
                        and cached_file_data['mtime'] >
                        cached_file_data['lastmod']):
                    log.debug('cached file size equal to metadata size and '
                              'cached file mtime later than metadata last '
                              'modification time.')
                    if not _get_file():
                        # skipped download
                        return

    # ... or get the file from S3
    __utils__['s3.query'](
        key=s3_key_kwargs['key'],
        keyid=s3_key_kwargs['keyid'],
        kms_keyid=s3_key_kwargs['keyid'],
        bucket=bucket_name,
        service_url=s3_key_kwargs['service_url'],
        verify_ssl=s3_key_kwargs['verify_ssl'],
        location=s3_key_kwargs['location'],
        path=_quote(path),
        local_file=cached_file_path,
        path_style=s3_key_kwargs['path_style'],
        https_enable=s3_key_kwargs['https_enable'],
    )
Example #22
0
def quote(*args, **kwargs):
    return _quote(*args, safe='', **kwargs)
Example #23
0
def quote(s):
    quoted = _quote(s)
    if isinstance(quoted, str):
        quoted = quoted.encode("ascii")
    return quoted
Example #24
0
def quote(value, safe='/'):
    if isinstance(value, text_type):
        (value, _len) = utf8_encoder(value, 'replace')
    (valid_utf8_str, _len) = utf8_decoder(value, 'replace')
    return _quote(valid_utf8_str.encode('utf-8'), safe)
Example #25
0
 def quote(string, *args, **kwargs):
     return _quote(string.decode('charmap'), *args,
                   **kwargs).encode('charmap')
Example #26
0
def quote(value):
    value = value.encode('utf8', errors='ignore') if isinstance(
        value, six.string_types) else str(value)
    return _quote(value, safe='')
Example #27
0
def uq(step):
    return _quote(step, safe='')
Example #28
0
 def batch(self, requests=[], parallel=None, breakOnError=None):
     # type: (list[str], bool, bool) -> dict
     
     """A batch request is a collection of sub-requests that enables developers to send multiple API calls with one HTTP request. 
     
     :param requests: An list of sub-request URLs. Each request will be executed and the responses of each one will be returned in the payload.
     :type requests: list of str
     
     :param parallel: By default, each sub-request is processed on the servers sequentially. If this is set to ``True``, then all sub-requests are processed at the same time, without waiting for the previous sub-request to finish before the next one is started.
     :type parallel: bool
     
     :param breakOnError: If this is set to ``True``, one sub-request failure will cause the entire batch to stop processing subsequent sub-requests and return a value of ``"false"`` for success.
     :type breakOnError: bool
     
     .. note::
        - The maximum amount of sub requests in one batch request is 50.
        - Dump format is not supported in batch calls.
        - The ``parallel`` and ``breakOnError`` parameters cannot be used in the same request.
     
     .. code-block:: python
        
        # Disable request submitting to get URLs from methods
        api.submitRequests = False
        
        # Generate list of request URLs
        requests = [
            api.usersFetch(),
            api.sessionsCheck(),
            api.scoresTables(),
            api.trophiesFetch(),
            api.dataStoreGetKeys("*", globalData=True),
            api.friends(),
            api.time()
        ]
        
        # Enable request submitting again
        api.submitRequests = True
        
        # Submit batch request and get all results
        result = api.batch(requests=requests)
     
     """
     
     if parallel is not None and breakOnError is not None:
         raise GameJoltDataCollision(["parallel", "break_on_error"])
     
     for i in range(len(requests)):
         requests[i] = requests[i].replace(self.__API_URL, "")
         requests[i] = requests[i].split("&signature=")[0]
         requests[i] += "&signature=" + _md5((requests[i] + self.privateKey).encode()).hexdigest()
         requests[i] = _quote(requests[i].replace(self.__API_URL, ""), safe="")
     
     # Required data
     data = {
         "game_id" : self.gameId,
         "requests" : requests if len(requests) > 0 else None
     }
     
     # Optional data
     optionalData = {
         "parallel" : self._processBoolean(parallel),
         "break_on_error" : self._processBoolean(breakOnError)
     }
     
     self._validateRequiredData(data)
     data.update(self._getValidData(optionalData))
     
     return self._submit(self.operations["batch"], data)