Beispiel #1
0
    async def get_users(self, *, user_ids=None, logins=None):
        route = HTTPRoute('GET', '/users')

        all_users_ids = []
        all_logins_ids = []
        if user_ids:
            u = user_ids[:100]
            while u:
                all_users_ids.append(u)
                u = u[100:]
        if logins:
            lo = logins[:100]
            while lo:
                all_logins_ids.append(lo)
                lo = lo[100:]

        responses = []
        requests = zip_longest(all_users_ids, all_logins_ids)
        for u, log in requests:
            params = MultiDict()
            if u:
                for user_id in u:
                    params.add('id', user_id)
            if log:
                for login in log:
                    params.add('login', login)
            responses.append(await self.request(route, params=params))

        return responses
Beispiel #2
0
def get_multi_dict_from_python_dict(resp_headers_dict: dict) -> MultiDictProxy:
    """Construct an :class:`aiohttp.MultiDictProxy` instance from a Python dictionary.

    Note: For now, this method is used for test only.

    .. note::

        Neither Python dictionary nor JSON supports multi-value key.  The response headers returned
        by `aiohttp` is of immutable type :class:`aiohttp.MultiDictProxy` while the one returned by
        `aiohttpretty` is of :class:`aiohttp.MultiDict`.

        WB tests use the :class:`aiohttp.MultiDict` type for both files and folders during modification
        and returns the :class:`aiohttp.MultiDictProxy` type to imitate the behavior of `aiohttp`.

    :param dict resp_headers_dict: the raw response headers dictionary
    :rtype: :class:`aiohttp.MultiDictProxy`
    """

    resp_headers = MultiDict(resp_headers_dict)
    google_hash = resp_headers.get('x-goog-hash', None)
    if google_hash:
        assert verify_raw_google_hash_header(google_hash)
        resp_headers.pop('x-goog-hash')
        google_hash_list = google_hash.split(',')
        for google_hash in google_hash_list:
            resp_headers.add('x-goog-hash', google_hash)

    return MultiDictProxy(resp_headers)
Beispiel #3
0
    async def search(
        self,
        keyword: str,
        *,
        type_of_content: str = None,
        provider_ids: List[int] = None,
        provider_names: List[str] = None,
        launch: bool = False,
    ) -> None:
        """Perform a search for content."""
        params = MultiDict({"keyword": keyword})

        if type_of_content:
            params["type"] = type_of_content

        if provider_ids:
            for provider_id in provider_ids:
                params.add('provider-id', str(provider_id))

        if provider_names:
            for provider_name in provider_names:
                params.add('provider', provider_name)

        if launch:
            params["launch"] = "true"

        await self._request("search/browse", method="POST", params=params)
Beispiel #4
0
    async def tags(self,
                   ns: Union[str, Sequence[str]] = None,
                   ids: bool = False) -> List[str]:
        """Get a list of the tags currently associated with registered devices.

        Args:
            ns: The tag namespace(s) to use when searching for tags.
                (default: default)
            ids: Include ID tags in the response. (default: false)

        Returns:
            The Synse v3 API tags response.

        See Also:
            https://synse.readthedocs.io/en/latest/server/api.v3/#tags
        """

        params = MultiDict()
        if ns:
            params.add('ns', ns)
        if ids:
            params.add('ids', 'true')

        response = await self.make_request(
            url=f'{self.url}/tags',
            method=GET,
            params=params,
        )
        return models.make_response(
            None,
            response,
        )
Beispiel #5
0
async def ask_queries(reader, writer):
    """
    This set of commands seems to elicit all of the unique information I can get out of
    my receiver.  (Denon AVR-S730H)

    :param reader:
    :param writer:
    :return:
    """

    commands = ["PW?", "MV?", "CV?", "MU?", "SI?"]
    commands += ["ZM?", "SV?", "SD?", "SLP?", "MS?"]
    commands += ["MSQUICK ?", "PSLOM ?"]
    commands += ["PSMULTEQ: ?", "PSDYNEQ ?", "PSREFLEV ?", "PSDYNVOL ?"]
    commands += ["PSEFF ?", "PSDEL ?", "PSSWR ?", "PSRSTR ?"]
    commands += ["Z2?", "Z2MU?", "Z2SLP?", "Z2QUICK ?", "TMAN?"]

    # commands = [b"Z2ON", b"SINET"]
    facts = MultiDict()
    for command in commands:
        writer.write(command.encode("ascii") + b"\r")
        lines = await read_lines_until(reader, 0.1)
        for line in lines:
            facts.add(line.strip(), command)
    return facts
Beispiel #6
0
    def post(self):
        """Return POST parameters."""
        if self._post is not None:
            return self._post
        if self.method not in self.POST_METHODS:
            self._post = MultiDictProxy(MultiDict())
            return self._post

        content_type = self.content_type
        if (content_type not in ('',
                                 'application/x-www-form-urlencoded',
                                 'multipart/form-data')):
            self._post = MultiDictProxy(MultiDict())
            return self._post

        body = yield from self.read()
        content_charset = self.charset or 'utf-8'

        environ = {'REQUEST_METHOD': self.method,
                   'CONTENT_LENGTH': str(len(body)),
                   'QUERY_STRING': '',
                   'CONTENT_TYPE': self.headers.get(hdrs.CONTENT_TYPE)}

        fs = cgi.FieldStorage(fp=io.BytesIO(body),
                              environ=environ,
                              keep_blank_values=True,
                              encoding=content_charset)

        supported_transfer_encoding = {
            'base64': binascii.a2b_base64,
            'quoted-printable': binascii.a2b_qp
        }

        out = MultiDict()
        _count = 1
        for field in fs.list or ():
            transfer_encoding = field.headers.get(
                hdrs.CONTENT_TRANSFER_ENCODING, None)
            if field.filename:
                ff = FileField(field.name,
                               field.filename,
                               field.file,  # N.B. file closed error
                               field.type)
                if self._post_files_cache is None:
                    self._post_files_cache = {}
                self._post_files_cache[field.name+str(_count)] = field
                _count += 1
                out.add(field.name, ff)
            else:
                value = field.value
                if transfer_encoding in supported_transfer_encoding:
                    # binascii accepts bytes
                    value = value.encode('utf-8')
                    value = supported_transfer_encoding[
                        transfer_encoding](value)
                out.add(field.name, value)

        self._post = MultiDictProxy(out)
        return self._post
Beispiel #7
0
    def post(self):
        """Return POST parameters."""
        if self._post is not None:
            return self._post
        if self.method not in self.POST_METHODS:
            self._post = MultiDictProxy(MultiDict())
            return self._post

        content_type = self.content_type
        if (content_type not in ('',
                                 'application/x-www-form-urlencoded',
                                 'multipart/form-data')):
            self._post = MultiDictProxy(MultiDict())
            return self._post

        body = yield from self.read()
        content_charset = self.charset or 'utf-8'

        environ = {'REQUEST_METHOD': self.method,
                   'CONTENT_LENGTH': str(len(body)),
                   'QUERY_STRING': '',
                   'CONTENT_TYPE': self.headers.get(hdrs.CONTENT_TYPE)}

        fs = cgi.FieldStorage(fp=io.BytesIO(body),
                              environ=environ,
                              keep_blank_values=True,
                              encoding=content_charset)

        supported_transfer_encoding = {
            'base64': binascii.a2b_base64,
            'quoted-printable': binascii.a2b_qp
        }

        out = MultiDict()
        _count = 1
        for field in fs.list or ():
            transfer_encoding = field.headers.get(
                hdrs.CONTENT_TRANSFER_ENCODING, None)
            if field.filename:
                ff = FileField(field.name,
                               field.filename,
                               field.file,  # N.B. file closed error
                               field.type)
                if self._post_files_cache is None:
                    self._post_files_cache = {}
                self._post_files_cache[field.name+str(_count)] = field
                _count += 1
                out.add(field.name, ff)
            else:
                value = field.value
                if transfer_encoding in supported_transfer_encoding:
                    # binascii accepts bytes
                    value = value.encode('utf-8')
                    value = supported_transfer_encoding[
                        transfer_encoding](value)
                out.add(field.name, value)

        self._post = MultiDictProxy(out)
        return self._post
Beispiel #8
0
def test_multidict():
    d = MultiDict({'a': 1, 'b': 2})
    d.add('b', 3)
    v = pformat(d)
    assert set(v.split('\n')) == {
        "<MultiDict({",
        "    'a': 1,",
        "    'b': 2,",
        "    'b': 3,",
        "})>",
    }
Beispiel #9
0
def decode_metadata(headers):
    metadata = MultiDict()
    for key, value in headers:
        if key.startswith((':', 'grpc-')) or key in _SPECIAL:
            continue
        elif key.endswith('-bin'):
            metadata.add(
                key,
                b64decode(value.encode('ascii') + (b'=' * (len(value) % 4))))
        else:
            metadata.add(key, value)
    return metadata
Beispiel #10
0
def remove_from_multidict(d: MultiDict, key: str, item: Any):
    """
    Removes an item from a multidict key.
    """
    # works by popping all, removing, then re-adding into
    i = d.popall(key, [])
    if item in i:
        i.remove(item)

    for n in i:
        d.add(key, n)

    return d
Beispiel #11
0
    async def handle(self, params: MultiDict) -> HTTPResponse:
        http_request = self.request

        # Rebuild parameters without value, as in RFC 6794 sec. 3.1
        new_params = MultiDict()
        for k, v in params.items():
            if v:
                new_params.add(k, v)
        params = new_params

        logger.debug('Request params: %s', params)

        # Create context
        context = Context(http_request=http_request, http_params=params)

        try:
            # Authenticate end user
            context.owner = await authenticate_end_user(http_request, params)

            # Authenticate client
            context.client = await authenticate_client(http_request, params)

            # Get request class
            request_class = self.get_request_class(params)

            # Create OAuth2 request
            kwargs = {}
            request_fields = {f.name for f in fields(request_class)}
            for k, v in params.items():
                if k in request_fields:
                    kwargs[k] = v
            context.oauth2_request = request_class(**kwargs)

            # Prepare response
            oauth2_response = await self.create_stream(context)

        except HTTPException as exc:
            return exc

        except HTTPRequestError as exc:
            raise HTTPBadRequest(reason=str(exc.args[0]))

        except OAuth2Error as exc:
            oauth2_response = exc

        except Exception as exc:  # pylint: disable=broad-except
            logger.error('%s: %s', type(exc), exc, exc_info=exc)
            oauth2_response = ServerError('Unexpected server error, '
                                          'please try later.')

        return self.create_response(context, oauth2_response)
Beispiel #12
0
def parse_content_type(ctype: str) -> str:
    parts = ctype.split(';')
    params = MultiDict()
    for item in parts[1:]:
        if not item:
            continue
        key, value = item.split('=', 1) if '=' in item else (item, '')
        params.add(key.lower().strip(), value.strip(' "'))
    fulltype = parts[0].strip().lower()
    if fulltype == '*':
        fulltype = '*/*'
    mtype, stype = fulltype.split('/', 1) if '/' in fulltype else (fulltype,
                                                                   '')
    return ContentType(type=mtype, subtype=stype, parameters=params)
Beispiel #13
0
    async def _collect_strategy(self, target, strategy):
        query_params = MultiDict()
        for category in self.CATEGORIES:
            query_params.add("category", category)
        query_params.add("url", target)
        query_params.add("strategy", strategy)
        if self._apikey:
            query_params.add("key", self._apikey)

        try:
            async with self._aiohttp_client.get(self.API_URL,
                                                params=query_params) as r:
                data = await r.json()
        except asyncio.TimeoutError:
            self.logger.error("PageSpeed API timeout :(")
            return

        if "error" in data:
            self.logger.error(
                "PageSpeed API Error: code %s, %s",
                data["error"]["code"],
                data["error"]["message"],
            )
            return

        labels = dict(strategy=strategy)

        self._handle_lighthouse(data["lighthouseResult"], labels)

        for lexp_type in ("loadingExperience", "originLoadingExperience"):
            if lexp_type in data and "metrics" in data[lexp_type]:
                self._handle_loading_experience(
                    data[lexp_type],
                    labels,
                    origin=lexp_type.startswith("origin"))
def cli(solr, cluster):

    # define prometheus collectors
    segments = MultiDict()
    deleted_documents = MultiDict()
    documents = MultiDict()
    bytes = MultiDict()
    age = MultiDict()

    # query solr for collection metrics to get list of collections available
    response = requests.get(
        solr + 'admin/metrics?group=core&prefix=QUERY./select.errors')
    errors = response.json()

    # sample segment information for each collection and add to collectors
    for key in errors['metrics']:

        # get the name of the collection
        collection = re.sub(r'solr\.core\.(.+)\.shard.*.replica_.*', r'\1',
                            key)

        # place a call for segment information
        response = requests.get(solr + collection + '/admin/segments?wt=json')
        segment_info = response.json()

        segment_label = "collection=\"%s\"" % (collection)

        segments[segment_label] = len(segment_info['segments'])

        for segment in segment_info['segments'].values():
            mergeCandidate = str(segment.get('mergeCandidate', False))
            source = segment['source']

            common_labels = "cluster=\"%s\",collection=\"%s\",source=\"%s\",mergeCandidate=\"%s\"" % (
                cluster, collection, source, mergeCandidate)

            # set samples
            deleted_documents.add(common_labels, segment['delCount'])
            documents.add(common_labels, segment['size'])
            bytes.add(common_labels, segment['sizeInBytes'])

            # set age
            created_at = dateutil.parser.parse(segment['age'])
            now = datetime.now(timezone.utc)
            age.add(common_labels, (now - created_at).total_seconds())

    print_prometheus_gauge('segments_total',
                           'total number of segments for the collection',
                           segments)
    print_prometheus_statistics(
        'segment_deleted_documents_total',
        'total number of deleted documents in a segment', deleted_documents)
    print_prometheus_statistics('segment_documents_total',
                                'total number of documents in a segment',
                                documents)
    print_prometheus_statistics('segment_bytes_total',
                                'total number of bytes in a segment', bytes)
    print_prometheus_statistics(
        'segment_age_seconds', 'age of a segment in seconds comparing to now',
        age)
 def function1732(self):
     'Return POST parameters.'
     if (self.attribute615 is not None):
         return self.attribute615
     if (self.attribute1016 not in self.var2369):
         self.attribute615 = MultiDictProxy(MultiDict())
         return self.attribute615
     var1401 = self.var1401
     if (var1401 not in ('', 'application/x-www-form-urlencoded', 'multipart/form-data')):
         self.attribute615 = MultiDictProxy(MultiDict())
         return self.attribute615
     var3997 = MultiDict()
     if (var1401 == 'multipart/form-data'):
         function942 = yield from self.function942()
         var3149 = yield from function942.next()
         while (var3149 is not None):
             var2068 = 0
             var3021 = self.attribute1376
             var1401 = var3149.function249.get(hdrs.CONTENT_TYPE)
             if var3149.filename:
                 var2128 = tempfile.TemporaryFile()
                 var4662 = yield from var3149.read_chunk(size=(2 ** 16))
                 while chunk:
                     var4662 = var3149.decode(var4662)
                     var2128.write(var4662)
                     var2068 += len(var4662)
                     if ((var3021 > 0) and (var2068 > var3021)):
                         raise ValueError('Maximum request body size exceeded')
                     var4662 = yield from var3149.read_chunk(size=(2 ** 16))
                 var2128.seek(0)
                 var2836 = var1064(var3149.name, var3149.filename, var2128, var1401, var3149.function249)
                 var3997.add(var3149.name, var2836)
             else:
                 var2833 = yield from var3149.function976(decode=True)
                 if ((var1401 is None) or var1401.startswith('text/')):
                     var4034 = var3149.get_charset(default='utf-8')
                     var2833 = var2833.decode(var4034)
                 var3997.add(var3149.name, var2833)
                 var2068 += len(var2833)
                 if ((var3021 > 0) and (var2068 > var3021)):
                     raise ValueError('Maximum request body size exceeded')
             var3149 = yield from function942.next()
     else:
         var1349 = yield from self.function976()
         if var1349:
             var4034 = (self.charset or 'utf-8')
             var3997.extend(parse_qsl(var1349.rstrip().decode(var4034), keep_blank_values=True, encoding=var4034))
     self.attribute615 = MultiDictProxy(var3997)
     return self.attribute615
Beispiel #16
0
def _rewrite_files_paths(request, query=None):
    """
    Rewrites paths in requests to use the MFS user root
    """
    query = query if query is not None else request.query
    username, _ = get_user_password(request)
    username_b64 = base64.b64encode(username.encode('utf-8')).decode('utf-8')
    new_query = MultiDict()
    for key, val in query.items():
        if key in ['arg', 'arg2'] and not val.startswith('/ipfs/'):
            if val[0] not in ['/', '\\']:
                raise ValueError()
            new_path = f'/{username_b64}{os.path.normpath(val)}'
            new_query.add(key, new_path)
        else:
            new_query.add(key, val)
    return new_query
def parse_mimetype(mimetype: str) -> MimeType:
    """Parses a MIME type into its components.

    mimetype is a MIME type string.

    Returns a MimeType object.

    Example:

    >>> parse_mimetype('text/html; charset=utf-8')
    MimeType(type='text', subtype='html', suffix='',
             parameters={'charset': 'utf-8'})

    """
    if not mimetype:
        return MimeType(
            type="", subtype="", suffix="", parameters=MultiDictProxy(MultiDict())
        )

    parts = mimetype.split(";")
    params = MultiDict()  # type: MultiDict[str]
    for item in parts[1:]:
        if not item:
            continue
        key, value = cast(
            Tuple[str, str], item.split("=", 1) if "=" in item else (item, "")
        )
        params.add(key.lower().strip(), value.strip(' "'))

    fulltype = parts[0].strip().lower()
    if fulltype == "*":
        fulltype = "*/*"

    mtype, stype = (
        cast(Tuple[str, str], fulltype.split("/", 1))
        if "/" in fulltype
        else (fulltype, "")
    )
    stype, suffix = (
        cast(Tuple[str, str], stype.split("+", 1)) if "+" in stype else (stype, "")
    )

    return MimeType(
        type=mtype, subtype=stype, suffix=suffix, parameters=MultiDictProxy(params)
    )
Beispiel #18
0
    def __init__(self, session: "libkol.Session",
                 user_id: Union[int, List[int]]) -> None:
        super().__init__(session)

        params = {"action": "modify", "begin": 1}

        # Wrap user_id in array if a single was supplied
        user_ids = [user_id] if isinstance(user_id, int) else user_id

        # Move to a list of tuples so we can have duplicate keys, then build the request
        multidict = MultiDict(params.items())
        for user_id in user_ids:
            multidict.add("pids[]", user_id)
            multidict.add("boot{}".format(user_id), "on")

        self.request = session.request("clan_members.php",
                                       pwd=True,
                                       params=multidict)
async def run(*,
              ws_addr: str,
              market_data_callback: MarketDataCallback = None,
              order_book_callback: OrderBookCallback = None,
              trades_callback: TradesCallback = None,
              trade_pairs: List[str] = None,
              loop: Optional[asyncio.AbstractEventLoop] = None) -> None:
    url = ws_addr
    if trade_pairs:
        params = MultiDict()
        for trade_pair in trade_pairs:
            params.add('trade_pair', trade_pair)
        url = '{}?{}'.format(url, urlencode(params))
    async with aiohttp.ClientSession(loop=loop) as session:
        async with session.ws_connect(url, receive_timeout=20,
                                      heartbeat=8) as ws:
            await reader_loop(ws, market_data_callback, order_book_callback,
                              trades_callback)
Beispiel #20
0
def parse_uri(uri):
    '''Parse a SIP URI into the scheme/userinfo/hostport/parameters/headers.'''
    match = uri_re.match(uri)
    if not match:
        raise ValueError(f"'{uri}' is not a valid SIP URI")
    groups = match.groupdict()

    scheme = groups.get('scheme')
    userinfo = groups.get('userinfo', None)
    hostport = groups.get('hostport')

    parameters = {}
    if groups.get('parameters'):
        param_pairs = groups.get('parameters').split(';')
    else:
        param_pairs = []
    for pair in param_pairs:
        param = pair.split('=')
        if len(param) != 2:
            raise ValueError('parameters must be formatted as `key=[val]`')
        key, val = param
        parameters[key] = val

    headers = MultiDict()
    if groups.get('headers'):
        header_pairs = groups.get('headers').split('&')
    else:
        header_pairs = []
    for pair in header_pairs:
        if len(pair.split('=')) != 2:
            raise ValueError('headers must be formatted as `key=[val]`')
        key, val = pair.split('=')
        headers.add(key, val)

    return URIParseResult(
        scheme=scheme,
        userinfo=userinfo,
        hostport=hostport,
        parameters=parameters,
        headers=headers,
    )
Beispiel #21
0
async def _get_multipart_params(request):
    """Extract a mapping of parts sent in a multipart request.

    :rtype: MultiDict
    """

    def get_part_name(part):
        _, params = parse_content_disposition(part.headers.get(CONTENT_DISPOSITION))
        return params.get("name")

    def get_part_data(part):
        if part.filename is None:
            return part.text()
        else:
            return part.read(decode=True)

    params = MultiDict()
    async for part in await request.multipart():
        params.add(get_part_name(part), await get_part_data(part))

    return params
Beispiel #22
0
 def info_data(self, request, **params):
     headers = self.getheaders(request)
     data = {'method': request.method,
             'headers': headers,
             'pulsar': self.pulsar_info(request)}
     if request.method in ENCODE_URL_METHODS:
         data['args'] = as_dict(request.url_data)
     else:
         args, files = request.data_and_files()
         jfiles = MultiDict()
         if files:
             for name, part in files.items():
                 try:
                     part = part.string()
                 except UnicodeError:
                     part = part.base64()
                 jfiles.add(name, part)
         data.update((('args', as_dict(args)),
                      ('files', as_dict(jfiles))))
     data.update(params)
     return data
Beispiel #23
0
    def links(self):
        links_str = ", ".join(self.headers.getall("link", []))

        links = MultiDict()

        if not links_str:
            return MultiDictProxy(links)

        for val in re.split(r",(?=\s*<)", links_str):
            url, params = re.match(r"\s*<(.*)>(.*)", val).groups()
            params = params.split(";")[1:]

            link = MultiDict()

            for param in params:
                key, _, value, _ = re.match(
                    r"^\s*(\S*)\s*=\s*(['\"]?)(.*?)(\2)\s*$",
                    param, re.M
                ).groups()

                link.add(key, value)

            key = link.get("rel", url)

            link.add("url", self.url.join(URL(url)))

            links.add(key, MultiDictProxy(link))

        return MultiDictProxy(links)
Beispiel #24
0
    def links(self):
        links_str = ", ".join(self.headers.getall("link", []))

        links = MultiDict()

        if not links_str:
            return MultiDictProxy(links)

        for val in re.split(r",(?=\s*<)", links_str):
            url, params = re.match(r"\s*<(.*)>(.*)", val).groups()
            params = params.split(";")[1:]

            link = MultiDict()

            for param in params:
                key, _, value, _ = re.match(
                    r"^\s*(\S*)\s*=\s*(['\"]?)(.*?)(\2)\s*$",
                    param, re.M
                ).groups()

                link.add(key, value)

            key = link.get("rel", url)

            link.add("url", self.url.join(URL(url)))

            links.add(key, MultiDictProxy(link))

        return MultiDictProxy(links)
def parse_query_string(request, allow_none=False):
    """We are not using self.request.form (parsed by Zope Publisher)!!
    There is special meaning for colon(:) in key field. For example `field_name:list`
    treats data as List and it doesn't recognize FHIR search modifier like :not, :missing
    as a result, from colon(:) all chars are ommited.

    Another important reason, FHIR search supports duplicate keys (defferent values) in query string.

    Build Duplicate Key Query String ::
        >>> import requests
        >>> params = {'patient': 'P001', 'lastUpdated': ['2018-01-01', 'lt2018-09-10']}
        >>> requests.get(url, params=params)
        >>> REQUEST['QUERY_STRING']
        'patient=P001&lastUpdated=2018-01-01&lastUpdated=lt2018-09-10'

        >>> from six.moves.urllib.parse import urlencode
        >>> params = [('patient', 'P001'), ('lastUpdated', '2018-01-01'), ('lastUpdated', 'lt2018-09-10')]
        >>> urlencode(params)
        'patient=P001&lastUpdated=2018-01-01&lastUpdated=lt2018-09-10'


    param:request
    param:allow_none
    """
    query_string = request.get("QUERY_STRING", "")
    params = MultiDict()

    for q in query_string.split("&"):
        parts = q.split("=")
        param_name = unquote_plus(parts[0])
        try:
            value = parts[1] and unquote_plus(parts[1]) or None
        except IndexError:
            if not allow_none:
                continue
            value = None

        params.add(param_name, value)

    return MultiDictProxy(params)
Beispiel #26
0
    def links(self) -> 'MultiDictProxy[MultiDictProxy[Union[str, URL]]]':
        links_str = ", ".join(self.headers.getall("link", []))

        if not links_str:
            return MultiDictProxy(MultiDict())

        links = MultiDict()  # type: MultiDict[MultiDictProxy[Union[str, URL]]]

        for val in re.split(r",(?=\s*<)", links_str):
            match = re.match(r"\s*<(.*)>(.*)", val)
            if match is None:  # pragma: no cover
                # the check exists to suppress mypy error
                continue
            url, params_str = match.groups()
            params = params_str.split(";")[1:]

            link = MultiDict()  # type: MultiDict[Union[str, URL]]

            for param in params:
                match = re.match(r"^\s*(\S*)\s*=\s*(['\"]?)(.*?)(\2)\s*$",
                                 param, re.M)
                if match is None:  # pragma: no cover
                    # the check exists to suppress mypy error
                    continue
                key, _, value, _ = match.groups()

                link.add(key, value)

            key = link.get("rel", url)  # type: ignore

            link.add("url", self.url.join(URL(url)))

            links.add(key, MultiDictProxy(link))

        return MultiDictProxy(links)
Beispiel #27
0
    async def read_cache(
            self,
            start: str = None,
            end: str = None) -> AsyncGenerator[models.Reading, None]:
        """Get a window of cached device readings.

        Args:
            start (str): An RFC3339 formatted timestamp which specifies a starting
                bound on the cache data to return. If no timestamp is specified,
                there will not be a starting bound.
            end (str): An RFC3339 formatted timestamp which specifies an ending
                bound on the cache data to return. If no timestamp is specified,
                there will not be an ending bound.

        Yields:
            The Synse v3 API read cache response.

        See Also:
            https://synse.readthedocs.io/en/latest/server/api.v3/#read-cache
        """

        params = MultiDict()
        if start:
            params.add('start', start)
        if end:
            params.add('end', end)

        response = self.stream_request(
            url=f'{self.url}/readcache',
            method=GET,
            params=params,
        )

        async for data in response:
            yield models.make_response(
                models.Reading,
                data,
            )
Beispiel #28
0
def parse_mimetype(mimetype: str) -> MimeType:
    """Parses a MIME type into its components.

    mimetype is a MIME type string.

    Returns a MimeType object.

    Example:

    >>> parse_mimetype('text/html; charset=utf-8')
    MimeType(type='text', subtype='html', suffix='',
             parameters={'charset': 'utf-8'})

    """
    if not mimetype:
        return MimeType(type='', subtype='', suffix='',
                        parameters=MultiDictProxy(MultiDict()))

    parts = mimetype.split(';')
    params = MultiDict()  # type: MultiDict[str]
    for item in parts[1:]:
        if not item:
            continue
        key, value = cast(Tuple[str, str],
                          item.split('=', 1) if '=' in item else (item, ''))
        params.add(key.lower().strip(), value.strip(' "'))

    fulltype = parts[0].strip().lower()
    if fulltype == '*':
        fulltype = '*/*'

    mtype, stype = (cast(Tuple[str, str], fulltype.split('/', 1))
                    if '/' in fulltype else (fulltype, ''))
    stype, suffix = (cast(Tuple[str, str], stype.split('+', 1))
                     if '+' in stype else (stype, ''))

    return MimeType(type=mtype, subtype=stype, suffix=suffix,
                    parameters=MultiDictProxy(params))
Beispiel #29
0
async def files_ls_handler(request, ipfs_url, django_url):
    """
    A handler for MFS ls. Here we need to handle the special case where
    `ipfs files ls` is called without an arg. This is implicitly assumed to be '/'
    by go-ipfs
    """
    new_query = None
    if 'arg' not in request.query:
        new_query = MultiDict()
        for key, val in request.query.items():
            new_query.add(key, val)
        new_query.add('arg', '/')

    try:
        new_query = _rewrite_files_paths(request, new_query)
    except ValueError:
        error_msg = {
            'Message': f'paths must start with a leading slash',
            'Code': 0,
            'Type': 'error'
        }
        return web.json_response(error_msg, status=500)
    return await ipfs_proxy_handler(request, ipfs_url, query=new_query)
Beispiel #30
0
def test_aiohttp_default_params_converter(params):
    converter = AiohttpParamsConverter()
    expected = MultiDict({
        "a": 1,
        "datetime": "2020-01-01T12:00:05",
    })
    expected.add("list", 1)
    expected.add("list", 2)
    expected.add("list", 3)
    expected.add("tuple", 3)
    expected.add("tuple", 4)
    expected.add("tuple", 5)

    result = converter.convert_query_params(params)
    assert isinstance(result, MultiDict)
    assert result == expected
Beispiel #31
0
    async def read(
        self,
        ns: str = None,
        tags: Union[str, Sequence[str], Sequence[Sequence[str]]] = None,
    ) -> List[models.Reading]:
        """Get the latest reading(s) for all devices which match the specified selector(s).

        Args:
            ns: The default namespace to use for the tags which do not
                include a namespace. (default: default)
            tags: The tags to filter devices on. Tags may be specified in multiple ways.
                A single string (e.g. 'foo/bar') will be taken as a single tag group. Multiple
                strings (e.g. ['foo/bar', 'abc/123']) will be taken as a single tag group.
                Multiple collections of strings (e.g. [['foo/bar'], ['abc/123', 'def/456']])
                will be taken as multiple tag groups.

        Returns:
            The Synse v3 API read response.

        See Also:
            https://synse.readthedocs.io/en/latest/server/api.v3/#read
        """

        params = MultiDict()
        utils.tag_params(tags, params)
        if ns:
            params.add('ns', ns)

        response = await self.make_request(
            url=f'{self.url}/read',
            method=GET,
            params=params,
        )
        return models.make_response(
            models.Reading,
            response,
        )
Beispiel #32
0
    async def scan(
        self,
        force: bool = None,
        ns: str = None,
        sort: str = None,
        tags: Union[str, Sequence[str], Sequence[Sequence[str]]] = None,
    ) -> List[models.DeviceSummary]:
        """Get a summary of all devices currently exposed by the Synse Server instance.

        Args:
            force (bool): Force a re-scan (do not use the cache). If True, the
                request will take longer since the internal device cache will
                be rebuilt. Forcing a scan will ensure the set of returned devices
                is up-to-date.
            ns (str): The default namespace to use for the tags which do not
                include a namespace. (default: default)
            sort (str): Specify the fields to sort by. Multiple fields may be
                specified as a comma-separated string, e.g. "plugin,id". The
                "tags" field can not be used for sorting. (default:
                "plugin,sort_index,id", where the sort_index is an internal sort
                preference which a plugin can optionally specify.)
            tags: The tags to filter devices on. Tags may be specified in multiple ways.
                A single string (e.g. 'foo/bar') will be taken as a single tag group. Multiple
                strings (e.g. ['foo/bar', 'abc/123']) will be taken as a single tag group.
                Multiple collections of strings (e.g. [['foo/bar'], ['abc/123', 'def/456']])
                will be taken as multiple tag groups.

        Returns:
            The Synse v3 API scan response.

        See Also:
            https://synse.readthedocs.io/en/latest/server/api.v3/#scan
        """

        params = MultiDict()
        utils.tag_params(tags, params)
        if ns:
            params.add('ns', ns)
        if force:
            params.add('force', str(force))
        if sort:
            params.add('sort', sort)

        response = await self.make_request(
            url=f'{self.url}/scan',
            method=GET,
            params=params,
        )
        return models.make_response(
            models.DeviceSummary,
            response,
        )
Beispiel #33
0
def tag_params(
    tags: Union[None, str, Union[List[str], Tuple[str]],
                Sequence[Union[List[str], Tuple[str]]]],
    params: MultiDict,
) -> MultiDict:
    """Generate tag query parameters for a request.

    If no tags are specified, nothing is added to the query params MultiDict.

    Tags may be specified in a number of ways:
    - A single tag (single tag group)
    - A collection of tags (single tag group)
    - A collection of collections of tags (multiple tag groups)

    A single tag group is used to filter devices which match all of the
    tags in the group. If multiple tag groups are specified, the end result
    will be the set union of the results of the individual tag group filters.

    Args:
        tags: The tags to process into query parameters.
        params: The MultiDict which holds the query parameters for the request.

    Returns
        The 'params' MultiDict which was provided as a parameter.

    Raises:
        ValueError: The incoming tags are specified in an unsupported format.
    """

    if not tags:
        return params

    if isinstance(tags, str):
        params.add('tags', tags)
        return params

    elif isinstance(tags, Sequence):
        if len(tags) == 0:
            return params

        if isinstance(tags[0], (List, Tuple)):
            for group in tags:
                params.add('tags', ','.join(group))
            return params

        elif isinstance(tags[0], str):
            params.add('tags', ','.join(tags))
            return params

    raise ValueError(
        f'Unable to process tag params: tags must be either str, Sequence[str], '
        f'or Sequence[Sequence[str]], but was {type(tags)}')
Beispiel #34
0
    def links(self) -> 'MultiDictProxy[MultiDictProxy[Union[str, URL]]]':
        links_str = ", ".join(self.headers.getall("link", []))

        if not links_str:
            return MultiDictProxy(MultiDict())

        links = MultiDict()  # type: MultiDict[MultiDictProxy[Union[str, URL]]]

        for val in re.split(r",(?=\s*<)", links_str):
            match = re.match(r"\s*<(.*)>(.*)", val)
            if match is None:  # pragma: no cover
                # the check exists to suppress mypy error
                continue
            url, params_str = match.groups()
            params = params_str.split(";")[1:]

            link = MultiDict()  # type: MultiDict[Union[str, URL]]

            for param in params:
                match = re.match(
                    r"^\s*(\S*)\s*=\s*(['\"]?)(.*?)(\2)\s*$",
                    param, re.M
                )
                if match is None:  # pragma: no cover
                    # the check exists to suppress mypy error
                    continue
                key, _, value, _ = match.groups()

                link.add(key, value)

            key = link.get("rel", url)  # type: ignore

            link.add("url", self.url.join(URL(url)))

            links.add(key, MultiDictProxy(link))

        return MultiDictProxy(links)
Beispiel #35
0
    async def post(self) -> MultiDictProxy:
        """Return POST parameters."""
        if self._post is not None:
            return self._post
        if self._method not in self.POST_METHODS:
            self._post = MultiDictProxy(MultiDict())
            return self._post

        content_type = self.content_type
        if (content_type not in ('',
                                 'application/x-www-form-urlencoded',
                                 'multipart/form-data')):
            self._post = MultiDictProxy(MultiDict())
            return self._post

        out = MultiDict()  # type: MultiDict

        if content_type == 'multipart/form-data':
            multipart = await self.multipart()
            max_size = self._client_max_size

            field = await multipart.next()
            while field is not None:
                size = 0
                content_type = field.headers.get(hdrs.CONTENT_TYPE)

                if field.filename:
                    # store file in temp file
                    tmp = tempfile.TemporaryFile()
                    chunk = await field.read_chunk(size=2**16)
                    while chunk:
                        chunk = field.decode(chunk)
                        tmp.write(chunk)
                        size += len(chunk)
                        if 0 < max_size < size:
                            raise HTTPRequestEntityTooLarge(
                                max_size=max_size,
                                actual_size=size
                            )
                        chunk = await field.read_chunk(size=2**16)
                    tmp.seek(0)

                    ff = FileField(field.name, field.filename,
                                   cast(io.BufferedReader, tmp),
                                   content_type, field.headers)
                    out.add(field.name, ff)
                else:
                    value = await field.read(decode=True)
                    if content_type is None or \
                            content_type.startswith('text/'):
                        charset = field.get_charset(default='utf-8')
                        value = value.decode(charset)
                    out.add(field.name, value)
                    size += len(value)
                    if 0 < max_size < size:
                        raise HTTPRequestEntityTooLarge(
                            max_size=max_size,
                            actual_size=size
                        )

                field = await multipart.next()
        else:
            data = await self.read()
            if data:
                charset = self.charset or 'utf-8'
                out.extend(
                    parse_qsl(
                        data.rstrip().decode(charset),
                        keep_blank_values=True,
                        encoding=charset))

        self._post = MultiDictProxy(out)
        return self._post
Beispiel #36
0
    async def post(self) -> "MultiDictProxy[Union[str, bytes, FileField]]":
        """Return POST parameters."""
        if self._post is not None:
            return self._post
        if self._method not in self.POST_METHODS:
            self._post = MultiDictProxy(MultiDict())
            return self._post

        content_type = self.content_type
        if content_type not in (
                "",
                "application/x-www-form-urlencoded",
                "multipart/form-data",
        ):
            self._post = MultiDictProxy(MultiDict())
            return self._post

        out = MultiDict()  # type: MultiDict[Union[str, bytes, FileField]]

        if content_type == "multipart/form-data":
            multipart = await self.multipart()
            max_size = self._client_max_size

            field = await multipart.next()
            while field is not None:
                size = 0
                field_ct = field.headers.get(hdrs.CONTENT_TYPE)

                if isinstance(field, BodyPartReader):
                    assert field.name is not None

                    # Note that according to RFC 7578, the Content-Type header
                    # is optional, even for files, so we can't assume it's
                    # present.
                    # https://tools.ietf.org/html/rfc7578#section-4.4
                    if field.filename:
                        # store file in temp file
                        tmp = tempfile.TemporaryFile()
                        chunk = await field.read_chunk(size=2**16)
                        while chunk:
                            chunk = field.decode(chunk)
                            tmp.write(chunk)
                            size += len(chunk)
                            if 0 < max_size < size:
                                tmp.close()
                                raise HTTPRequestEntityTooLarge(
                                    max_size=max_size, actual_size=size)
                            chunk = await field.read_chunk(size=2**16)
                        tmp.seek(0)

                        if field_ct is None:
                            field_ct = "application/octet-stream"

                        ff = FileField(
                            field.name,
                            field.filename,
                            cast(io.BufferedReader, tmp),
                            field_ct,
                            field.headers,
                        )
                        out.add(field.name, ff)
                    else:
                        # deal with ordinary data
                        value = await field.read(decode=True)
                        if field_ct is None or field_ct.startswith("text/"):
                            charset = field.get_charset(default="utf-8")
                            out.add(field.name, value.decode(charset))
                        else:
                            out.add(field.name, value)
                        size += len(value)
                        if 0 < max_size < size:
                            raise HTTPRequestEntityTooLarge(max_size=max_size,
                                                            actual_size=size)
                else:
                    raise ValueError(
                        "To decode nested multipart you need "
                        "to use custom reader", )

                field = await multipart.next()
        else:
            data = await self.read()
            if data:
                charset = self.charset or "utf-8"
                bytes_query = data.rstrip()
                try:
                    query = bytes_query.decode(charset)
                except LookupError:
                    raise HTTPUnsupportedMediaType()
                out.extend(
                    parse_qsl(qs=query,
                              keep_blank_values=True,
                              encoding=charset))

        self._post = MultiDictProxy(out)
        return self._post
Beispiel #37
0
    def post(self):
        """Return POST parameters."""
        if self._post is not None:
            return self._post
        if self._method not in self.POST_METHODS:
            self._post = MultiDictProxy(MultiDict())
            return self._post

        content_type = self.content_type
        if (content_type not in ('',
                                 'application/x-www-form-urlencoded',
                                 'multipart/form-data')):
            self._post = MultiDictProxy(MultiDict())
            return self._post

        out = MultiDict()

        if content_type == 'multipart/form-data':
            multipart = yield from self.multipart()

            field = yield from multipart.next()
            while field is not None:
                size = 0
                max_size = self._client_max_size
                content_type = field.headers.get(hdrs.CONTENT_TYPE)

                if field.filename:
                    # store file in temp file
                    tmp = tempfile.TemporaryFile()
                    chunk = yield from field.read_chunk(size=2**16)
                    while chunk:
                        chunk = field.decode(chunk)
                        tmp.write(chunk)
                        size += len(chunk)
                        if max_size > 0 and size > max_size:
                            raise ValueError(
                                'Maximum request body size exceeded')
                        chunk = yield from field.read_chunk(size=2**16)
                    tmp.seek(0)

                    ff = FileField(field.name, field.filename,
                                   tmp, content_type, field.headers)
                    out.add(field.name, ff)
                else:
                    value = yield from field.read(decode=True)
                    if content_type is None or \
                            content_type.startswith('text/'):
                        charset = field.get_charset(default='utf-8')
                        value = value.decode(charset)
                    out.add(field.name, value)
                    size += len(value)
                    if max_size > 0 and size > max_size:
                        raise ValueError(
                            'Maximum request body size exceeded')

                field = yield from multipart.next()
        else:
            data = yield from self.read()
            if data:
                charset = self.charset or 'utf-8'
                out.extend(
                    parse_qsl(
                        data.rstrip().decode(charset),
                        keep_blank_values=True,
                        encoding=charset))

        self._post = MultiDictProxy(out)
        return self._post