Example #1
0
    def __init__(self, url=None, params=None, decode=False, **kwargs):
        delay = kwargs.get('delay')
        mkwargs = {'CACHE_TIMEOUT': kwargs.get('cache_timeout')}
        params = params or {}

        self.r = None
        self.context = SleepyDict(delay=delay) if delay else None
        self.decode = decode
        self.def_encoding = kwargs.get('encoding', ENCODING)
        self.cache_type = kwargs.get('cache_type')
        self.timeout = kwargs.get('timeout')

        if kwargs.get('cache_threshold'):
            mkwargs['CACHE_THRESHOLD'] = kwargs['cache_threshold']

        if self.cache_type:
            mkwargs['cache_type'] = self.cache_type
            opener = memoize(**mkwargs)(self.open)
        else:
            opener = self.open

        response = opener(get_abspath(url), **params)
        wrapper = StringIO if self.decode else BytesIO
        f = wrapper(response) if self.cache_type else response
        self.close = f.close
        self.read = f.read
        self.readline = f.readline

        try:
            self.seek = f.seek
        except AttributeError:
            pass
Example #2
0
File: utils.py Project: sottom/riko
    def __init__(self, url=None, params=None, decode=False, **kwargs):
        delay = kwargs.get('delay')
        params = params or {}

        self.r = None
        self.ext = None
        self.context = SleepyDict(delay=delay) if delay else None
        self.decode = decode
        self.def_encoding = kwargs.get('encoding', ENCODING)
        self.cache_type = kwargs.get('cache_type')

        # TODO: need to use sep keys for memoize and urlopen
        self.timeout = kwargs.get('timeout')

        if self.cache_type:
            memoizer = memoize(**kwargs)
            opener = memoizer(self.open)
            self.cache_type = memoizer.cache_type
            self.client_name = memoizer.client_name
        else:
            opener = self.open
            self.cache_type = self.client_name = None

        response = opener(get_abspath(url), **params)
        wrapper = StringIO if self.decode else BytesIO
        f = wrapper(response) if self.cache_type else response
        self.close = f.close
        self.read = f.read
        self.readline = f.readline

        try:
            self.seek = f.seek
        except AttributeError:
            pass
Example #3
0
def parser(base, objconf, skip=False, **kwargs):
    """ Parses the pipe content

    Args:
        base (str): The base currency (exchanging from)
        objconf (obj): The pipe configuration (an Objectify instance)
        skip (bool): Don't parse the content
        kwargs (dict): Keyword arguments

    Kwargs:
        assign (str): Attribute to assign parsed content (default: exchangerate)
        stream (dict): The original item

    Returns:
        dict: The item

    Examples:
        >>> from riko import get_path
        >>> from meza.fntools import Objectify
        >>>
        >>> url = get_path('quote.json')
        >>> conf = {'url': url, 'currency': 'USD', 'sleep': 0, 'precision': 6}
        >>> item = {'content': 'GBP'}
        >>> objconf = Objectify(conf)
        >>> kwargs = {'stream': item, 'assign': 'content'}
        >>> parser(item['content'], objconf, **kwargs)
        Decimal('1.545801')
    """
    same_currency = base == objconf.currency

    if skip:
        rate = kwargs['stream']
    elif same_currency:
        rate = Decimal(1)
    elif objconf.url.startswith('http'):
        get = partial(requests.get, stream=True)
        sget = memoize(HALF_DAY)(get) if objconf.memoize else get
        r = sget(objconf.url, params=objconf.params)
        r.raw.decode_content = True
        json = next(items(r.raw, ''))
    else:
        context = SleepyDict(delay=objconf.sleep)
        url = get_abspath(objconf.url)

        try:
            with closing(urlopen(url, context=context)) as f:
                json = next(items(f, ''))
        except TypeError:
            with closing(urlopen(url)) as f:
                json = next(items(f, ''))

    if not (skip or same_currency):
        places = Decimal(10)**-objconf.precision
        rates = parse_response(json)
        rate = calc_rate(base, objconf.currency, rates, places=places)

    return rate
Example #4
0
File: utils.py Project: tianhm/riko
def opener(url,
           memoize=False,
           delay=0,
           encoding=ENCODING,
           params=None,
           **kwargs):
    params = params or {}
    timeout = kwargs.get('timeout')
    decode = kwargs.get('decode')

    if url.startswith('http') and params:
        r = requests.get(url, params=params, stream=True)
        r.raw.decode_content = decode
        response = r.text if memoize else r.raw
    else:
        req = Request(url, headers={'User-Agent': default_user_agent()})
        context = SleepyDict(delay=delay) if delay else None

        try:
            r = urlopen(req, context=context, timeout=timeout)
        except TypeError:
            r = urlopen(req, timeout=timeout)
        except HTTPError as e:
            raise URLError(f'{url} returned {e.code}: {e.reason}')
        except URLError as e:
            raise URLError(f'{url}: {e.reason}')

        text = r.read() if memoize else None

        if decode:
            encoding = get_response_encoding(r, encoding)

            if text:
                response = compat.decode(text, encoding)
            else:
                response = reencode(r.fp, encoding, decode=True)
                response.r = r
        else:
            response = text or r

    content_type = get_response_content_type(r)
    return (response, content_type)
Example #5
0
def parse_rss(url, delay=0):
    context = SleepyDict(delay=delay)
    response = None

    try:
        response = urlopen(decode(url), context=context)
    except TypeError:
        try:
            response = urlopen(decode(url))
        except (ValueError, URLError):
            parsed = rssparser.parse(url)
    except (ValueError, URLError):
        parsed = rssparser.parse(url)

    if response:
        content = response.read() if speedparser else response

        try:
            parsed = rssparser.parse(content)
        finally:
            response.close()

    return parsed