Esempio n. 1
0
def request(url, post=None, headers=None, mobile=False, safe=False, timeout='30'):
    try:
        try: headers.update(headers)
        except: headers = {}

        agent = cache.get(cloudflareAgent, 168)

        if not 'User-Agent' in headers: headers['User-Agent'] = agent

        u = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)

        cookie = cache.get(cloudflareCookie, 168, u, post, headers, mobile, safe, timeout)

        result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, output='response', error=True)

        if result[0] == '503':
            agent = cache.get(cloudflareAgent, 0) ; headers['User-Agent'] = agent

            cookie = cache.get(cloudflareCookie, 0, u, post, headers, mobile, safe, timeout)

            result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout)
        else:
            result= result[1]

        return result
    except:
        return
Esempio n. 2
0
def test_cache_set_expire(mock_cache):
    redis, clock = mock_cache
    cache.set("foo", "bar", expires=60)
    assert cache.get("foo") == "bar"
    clock.set_time(datetime.datetime.now() + datetime.timedelta(seconds=61))
    redis.do_expire()
    assert cache.get("foo") == None
Esempio n. 3
0
def get(title, year, imdb, tvdb, season, episode, show, date, genre):
    try:
        redirect = False
        if len(season) > 3: redirect = True
        genre = [i.strip() for i in genre.split('/')]
        genre = [i for i in genre if any(x == i for x in ['Reality', 'Game Show', 'Talk Show'])]
        if not len(genre) == 0: redirect = True
        blocks = ['73141']
        if tvdb in blocks: redirect = True
        if redirect == False: raise Exception()
    except:
        return (season, episode)

    try:
        tvrage = cache.get(getTVrageId, 8640, imdb, tvdb, show, year)
        if tvrage == None: raise Exception()
    except:
        return (season, episode)

    try:
        result = cache.get(getTVrageEpisode, 8640, tvrage, title, date, season, episode)
        if result == None: raise Exception()
        return (result[0], result[1])
    except:
        return (season, episode)
Esempio n. 4
0
def geoTimeQuery(comm=None, level=None, host=settings.IMPALA[0], port=settings.IMPALA[1], geo=None, time=None):
    level = cache.get().get("graph_num_levels","")
    nodetable = cache.get().get("table","") + '_good_nodes'
    edgestable = cache.get().get("table","") + '_good_graph'
    trackstable = cache.get().get("table","") + '_tracks_comms_joined'

    query = 'select distinct comm_' + str(level) + ' from ' + trackstable + ' where '
    geoThere = False
    if geo["min_lat"] != None:
        locationquery = ' cast(intersectx as double) >= ' + geo["min_lat"].replace('"','') + ' and cast(intersectx as double) <= ' + geo["max_lat"].replace('"','') + ' and cast(intersecty as double) >= ' + geo["min_lon"].replace('"','') + ' and cast(intersecty as double) <= ' + geo["max_lon"].replace('"','')
        query = query + locationquery
        geoThere = True

    if time["min_time"] != None:
        timequery = ' dt >= ' + time["min_time"] + ' and dt <= ' + time["max_time"] 
        if geoThere:
            query = query + ' and '
        query = query + timequery
    #print query
    
    nodequery = 'select node, comm, num_members, level from ' + nodetable + ' where level = "' + str(level) + '" and comm in '
    edgequery = 'select source, target, weight, level from ' +  edgestable + ' where level = "' + str(level) + '" '

    #print nodequery

    #print edgequery
    
    with impalaopen(host + ':' + port) as client:
        qResults = client.execute(query)
        comm_string = '( ' 
        for record in qResults.data:
            comm_string = comm_string + '"' + record.strip() + '", '
        comm_string = comm_string[0:len(comm_string)-2] + ')'
        nodequery = nodequery + comm_string
        edgequery = edgequery + ' and (source_comm in ' + comm_string + ' and target_comm in ' + comm_string + ' )'
    
    with impalaopen(host + ':' + port) as client:
        qResults = client.execute(nodequery)
        mapping = {}
        idx = 0
        for record in qResults.data:
            node,comm,num_members,level = record.split('\t')
            mapping[node] = {"index":idx,"nodename":node,"node_comm":comm,"level":level,"num_members":num_members}
            idx = idx + 1
    
    edges = []
    nodes = []
    with impalaopen(host + ':' + port) as client:
        qResults = client.execute(edgequery)
        for record in qResults.data:
            source,target,weight,level = record.split('\t')
            edges.append({"source":mapping[source]["index"],"sourcename":source,"target":mapping[target]["index"],"targetname":target,"weight":weight})
        for i in mapping.keys():
            nodes.append({"index":mapping[i]["index"],"nodename":mapping[i]["nodename"],"node_comm":mapping[i]["node_comm"],"level":mapping[i]["level"],"num_members":mapping[i]["num_members"]})
    
    response = {}
    response["gephinodes"] = nodes
    response["gephigraph"] = edges
    return response
Esempio n. 5
0
def test_set_multi_expires(mock_cache):
    redis, clock = mock_cache
    cache.set_multi({"foo1": "bar1", "foo2": "bar2"}, expires=60)
    assert cache.get("foo1") == "bar1"
    assert cache.get("foo2") == "bar2"
    clock.set_time(datetime.datetime.now() + datetime.timedelta(seconds=61))
    redis.do_expire()
    assert cache.get("foo1") == None
    assert cache.get("foo2") == None
Esempio n. 6
0
def request(url, timeout='30'):
    try:
        u = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)

        cookie = cache.get(sucuri, 168, u, timeout)
        if cookie == None:
            cookie = cache.get(sucuri, 0, u, timeout)

        result = client.request(url, cookie=cookie, timeout=timeout)
        return result
    except:
        return
Esempio n. 7
0
def removeDownload(url):
    try:
        def download(): return []
        result = cache.get(download, 600000000, table='rel_dl')
        if result == '': result = []
        result = [i for i in result if not i['url'] == url]
        if result == []: result = ''

        def download(): return result
        result = cache.get(download, 0, table='rel_dl')

        control.refresh()
    except:
        control.infoDialog('You need to remove file manually', 'Can not remove from Queue')
Esempio n. 8
0
def main(datestamp, sleep_interval = 1):
    'Download given a datestamp.'
    # Set the directories.
    index_dir = path('downloads', 'index', datestamp)
    image_dir = path('downloads', 'images')

    # Download the index.
    index = get('http://www.postsecret.com/', cachedir = index_dir)

    # Download the new images.
    html = fromstring(index.read())
    srcs = html.xpath('//img/@src')
    for src in srcs:
        get(src, cachedir = image_dir)
        sleep(sleep_interval)
Esempio n. 9
0
def request(url, post=None, headers=None, mobile=False, safe=False, timeout='60'):
    try:
        u = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)
        cookie = cache.get(cloudflare, 3, u, post, headers, mobile, safe, timeout)
        result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, output='response', error=True)

        if 'HTTP Error 503' in result[0]:
            cookie = cache.get(cloudflare, 0, u, post, headers, mobile, safe, timeout)
            result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout)
        else:
            result= result[1]

        return result
    except:
        return
def fetch_webpage_text(url,use_cache=True):
	if use_cache and cache.contains(url):
		return cache.get(url)
	# cache miss, download it
	content = requests.get(url).text
	cache.put(url,content)
	return content
Esempio n. 11
0
def variable_init(conf_={}):
    """
   Load the persistent variable table.
  
   The variable table is composed of values that have been saved in the table
   with variable_set() as well as those explicitly specified
   in the configuration file.
  """
    # NOTE: caching the variables improves performance by 20% when serving
    # cached pages.
    cached = lib_cache.get("variables", "cache")
    if cached:
        variables = cached.data
    else:
        variables = {}
        result = lib_database.query("SELECT * FROM {variable}")
        while True:
            variable = lib_database.fetch_object(result)
            if not variable:
                break
            variables[variable.name] = php.unserialize(variable.value)
        lib_cache.set("variables", variables)
    for name, value in conf_.items():
        variables[name] = value
    return variables
Esempio n. 12
0
    def post(self):
        tx = self.request.get_range('x', None)
        ty = self.request.get_range('y', None)
        z = self.request.get_range('z', None)
        limit = self.request.get_range('limit', min_value=1, max_value=1000, default=1000)
        offset = self.request.get_range('offset', min_value=0, default=0)
        name = self.request.get('name', None)
        source_name = self.request.get('source', None)

        if tx is None or ty is None or z is None or name is None or source_name is None:
            self.error(400)
            return
        
        key = 'tile-%s-%s-%s-%s-%s' % (z, ty, tx, source_name, name)
        png = cache.get(key)
        if png is None:
            png = get_tile_png(tx, ty, z, name, source_name, limit, offset) 
            if png is None:
                png = PointTile.blank()
            cache.add(key, png, dumps=False)
    
        logging.info('TILE BACKEND MEMORY = %s' % runtime.memory_usage().current())
        self.response.set_status(200)
        self.response.headers['Content-Type'] = 'image/png'
        self.response.out.write(png)            
Esempio n. 13
0
def getCurrentHeatMap(*args):
    table = cache.get().get("table");
    query = "select x, y, log10(sum(value)+1) as sv from " + database + ".micro_path_intersect_counts_%s group by x, y" % table
    with impalaopen(":".join(settings.IMPALA)) as curr:
        curr.execute(query);
        fmt = lambda row : "{ location: new google.maps.LatLng(%s, %s), weight: %s }" % row
        return "[" + ",".join([fmt(row) for row in curr]) + "]"
Esempio n. 14
0
def LinksPage(url, iconimage, description):
	descriptions, links, seasons = cache.get(Links_regex, 72, url, table="pages")
	if len(descriptions) == 1:
		description = descriptions[0]
	
	if seasons is None:
		addDir('[COLOR red] למנויים באתר, לא דרך קודי. [/COLOR]','99',99,'',False, description)
		return
	elif seasons:
		series_num = url.split('-')[-1]
		GetSeasons(series_num, iconimage, description)
	else:
		if len(links) < 1:
			addDir('[COLOR red] לא נמצאו מקורות ניגון [/COLOR]','99',99,'',False, description)
			return
		elif len(links) > 1:
			links = SortByQuality(links)
			playingUrlsList = []
			for link in links:
				playingUrlsList.append(link[2])
			addDir('[COLOR red] בחר בניגון אוטומטי [/COLOR]','99',99,'',False, description)
			addDir('{0} - ניגון אוטומטי'.format(name), json.dumps(playingUrlsList), 7, iconimage, False, description)
			addDir('[COLOR red]  או בחר מקור לניגון, אם לא עובד נסה אחר [/COLOR]','99',99,'',False, description)
		for link in links:
			addDir("{0} - {1} - איכות {2}".format(name, link[0], link[1]),link[2],5,iconimage,False, description)
Esempio n. 15
0
    def gotprices(self, request):

        try:
            price = tornado.escape.json_decode(request.body)
        except:
            self.returnerror("No trip price date receaved")
            return None

        price = price["data"]
        trips = self.trips["data"]["rows"]

        for i in range(0, len(trips)):
            for j in range(0, len(price)):
                if price[j]["journeyId"] == trips[i]["id"]:
                    trips[i]["pricedata"] = price[j]
                    trips[i]["departureDate"] = self.getdate
                    trips[i]["departureLocation"] = self.getfrom
                    trips[i]["arrivalLocation"] = self.getto
                    data = Empty()
                    data.getdate = self.getdate
                    data.getfrom = self.getfrom
                    data.gettime = trips[i]["departureTime"]
                    data.getto = self.getto
                    data.gettotime = trips[i]["arrivalTime"]
                    cache.store("sj", data, trips[i])
                    break

        try:
            self.returnrequest(cache.get("sj", self))

        except:
            self.returnerror("Trip not found in search")
def fetch_webpage(url, use_cache=True):
    key = cache.md5_key(url)
    if use_cache and cache.contains(key):
        return cache.get(key)
    content = requests.get(url).text
    cache.put(key,content)
    return content
Esempio n. 17
0
def request(url, timeout='30'):
    try:
        u = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)
        cookie = cache.get(cloudflare, 168, u, timeout)

        result = client.request(url, cookie=cookie, timeout=timeout, output='response', error=True)

        if 'HTTP Error 503' in result[0]:
            cookie = cache.get(cloudflare, 0, u, timeout)
            result = client.request(url, cookie=cookie, timeout=timeout)
        else:
            result= result[1]

        return result
    except:
        return
Esempio n. 18
0
 def post(self):
     # Check parameters
     name = self.request.get('name', None)
     if not name:
         self.error(400)
         return
     source_name = self.request.get('source', None)
     
     # Check cache
     key = self.cache_key(name, source_name)
     names = cache.get(key)
     if names:
         self.response.set_status(200)
         self.response.headers['Content-Type'] = "application/json"
         self.response.out.write(names)
         return
     
     source = sources.get(source_name)
     if not source:
         # TODO: Get names from all sources?
         self.error(404)
         return
     
     # Make service request for names
     names, status_code = self.get_names(source, name)        
     if status_code != 200:
         self.error(status_code)
         return
     
     # Update cache and send response
     cache.add(key, names)
     self.response.set_status(200)
     self.response.headers['Content-Type'] = "application/json"
     self.response.out.write(simplejson.dumps(names))
Esempio n. 19
0
 def get_and_cache_owner(self):
     cache_key = 'TickUser(%s)' % self.get_owner_key()
     owner = cache.get(cache_key)
     if not owner:
         owner = self.owner
         cache.set(cache_key,owner)
     return owner
Esempio n. 20
0
def request(url, mobile=False, timeout="30"):
    try:
        u = "%s://%s" % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)
        cookie = cache.get(cloudflare, 168, u, mobile, timeout)

        result = client.request(url, cookie=cookie, mobile=mobile, timeout=timeout, output="response", error=True)

        if "HTTP Error 503" in result[0]:
            cookie = cache.get(cloudflare, 0, u, mobile, timeout)
            result = client.request(url, cookie=cookie, mobile=mobile, timeout=timeout)
        else:
            result = result[1]

        return result
    except:
        return
Esempio n. 21
0
def query(type=QUERY_ANIME, aid=None, **kwargs):
    """
    Query AniDB for information about the anime identified by *aid* or the
    complete list of categories.

    :param type: Either QUERY_CATEGORIES or QUERY_ANIME
    :param aid: If *type* is QUERY_ANIME, the aid of the anime
    :param kwargs: Any kwargs you want to pass to :func:`requests.get`
    :raises: ValueError if `anidb.CLIENT` or `anidb.CLIENTVERSION` are not set
    :rtype: :class:`anidb.model.Anime` or a list of
            :class:`anidb.model.Category`
    """
    if CLIENT is None or CLIENTVERSION is None:
        raise ValueError(
                "You need to assign values to both CLIENT and CLIENTVERSION")
    if type == QUERY_ANIME:
        if aid is None:
            raise TypeError("aid can't be None")
        else:
            cacheresult = cache.get(aid)
            if cacheresult is not None:
                return cacheresult
                
            #print ANIDB_URL % (CLIENT, CLIENTVERSION, "anime") + "&aid=%i" % aid

            response = \
                requests.get(ANIDB_URL % (CLIENT, CLIENTVERSION, "anime")
                        + "&aid=%i" % aid, **kwargs)
            result =_handle_response(response.content)
            cache.save(aid, result)
            return result
    elif type == QUERY_CATEGORIES:
        response = requests.get(ANIDB_URL % (CLIENT, CLIENTVERSION,
                                "categorylist"), **kwargs)
        return _handle_response(response.content)
Esempio n. 22
0
def get_lat_long(dbo, address, town, county, postcode, country = None):
    """
    Looks up a latitude and longitude from an address using GEOCODE_URL
    and returns them as lat,long,(first 3 chars of address)
    Returns None if no results were found.
    NB: dbo is only used for contextual reference in logging, no database
        calls are made by any of this code.
    """

    if address.strip() == "":
        return None

    try:
        # Synchronise this process to a single thread to prevent
        # abusing our geo provider and concurrent requests for the
        # same address when opening an animal with the same
        # original/brought in by owner, etc.
        lat_long_lock.acquire()

        url = ""
        if country is None: 
            country = LOCALE_COUNTRY_NAME_MAP[dbo.locale]

        if BULK_GEO_PROVIDER == "cloudmade":
            q = normalise_cloudmade(address, town, county, postcode, country)
            url = CLOUDMADE_URL.replace("{key}", BULK_GEO_PROVIDER_KEY).replace("{q}", q)
        elif BULK_GEO_PROVIDER == "nominatim":
            q = normalise_nominatim(address, town, county, postcode, country)
            url = NOMINATIM_URL.replace("{q}", q)
        else:
            al.error("unrecognised geo provider: %s" % BULK_GEO_PROVIDER, "geo.get_lat_long", dbo)

        al.debug("looking up geocode for address: %s" % q, "geo.get_lat_long", dbo)
        
        key = "nom:" + q
        if cache.available():
            v = cache.get(key)
            if v is not None:
                al.debug("cache hit for address: %s = %s" % (q, v), "geo.get_lat_long", dbo)
                return v

        jr = urllib2.urlopen(url, timeout = GEO_LOOKUP_TIMEOUT).read()
        j = json.loads(jr)

        latlon = None
        if BULK_GEO_PROVIDER == "cloudmade":
            latlon = parse_cloudmade(dbo, jr, j, q)
        elif BULK_GEO_PROVIDER == "nominatim":
            latlon = parse_nominatim(dbo, jr, j, q)

        # Cache this address/geocode response for an hour
        if cache.available() and latlon is not None:
            cache.put(key, latlon, 3600)

        return latlon

    except Exception,err:
        al.error(str(err), "geo.get_lat_long", dbo)
        return None
Esempio n. 23
0
def getCurrentHeatMap(*args):
    table = cache.get().get("table");
    query = "select x, y, sum(value) from micro_path_intersect_counts_%s group by x, y" % table
    with impalaopen(":".join(settings.IMPALA)) as client:
        results = client.execute(query);
        data = results.get_data()
        fmt = lambda row : "{ location: new google.maps.LatLng(%s, %s), weight: %s }" % tuple(row.split("\t"))
        return "[" + ",".join([fmt(row) for row in data.split("\n")]) + "]"
Esempio n. 24
0
def fetch_webpage_text(url, use_cache=True):
    if use_cache and cache.contains(url):
        return cache.get(url)
    # if cache miss, download it and sleep one second to prevent too-frequent calls
    content = requests.get(url).text
    cache.put(url,content)
    time.sleep(1)
    return content
Esempio n. 25
0
def get_cached_response(cache_key):
    """
    Gets a service call response from the cache based on its key.
    If no entry is found, None is returned.
    """
    if not CACHE_SERVICE_RESPONSES:
        return None
    return cache.get(cache_key)
Esempio n. 26
0
def review(batch, remote, debug):
    if (not remote):
        logging.info("cannot review locally")
        return
    jobs = cache.get("batch/%s/jobs" % batch, remote)
    review_jobs("train", jobs['train'])
    review_jobs("validate", jobs['validate'])
    review_jobs("test", jobs['test'])
    review_jobs("report", [jobs['report']])
Esempio n. 27
0
def request(url, post=None, headers=None, mobile=False, safe=False, timeout='60'):
    try:
        if headers is None:
            headers = {common.Decode('ouLb26Vv1Mq74w=='): common.Decode('uN7a0qWb3Nu25w==')}
        else:
            headers[common.Decode('ouLb26Vv1Mq74w==')] = common.Decode('uN7a0qWb3Nu25w==')
        u = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)
        cookie = cache.get(cloudflare, 3, u, post, {common.Decode('ouLb26Vv1Mq74w=='): common.Decode('uN7a0qWb3Nu25w==')}, mobile, safe, timeout, table='cookies')
        result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout, output='response', error=True)

        if 'HTTP Error 503' in result[0]:
            cookie = cache.get(cloudflare, 0, u, post, {common.Decode('ouLb26Vv1Mq74w=='): common.Decode('uN7a0qWb3Nu25w==')}, mobile, safe, timeout, table='cookies')
            result = client.request(url, cookie=cookie, post=post, headers=headers, mobile=mobile, safe=safe, timeout=timeout)
        else:
            result= result[1]

        return result
    except:
        return
Esempio n. 28
0
def report(batch, remote, debug, dependency = []):
    params = cache.get("batch/%s/params" % batch, remote)
    logging.info("running reporter instance")
    if (remote):
        k = cloud.call(reporter.report, batch, params, remote, debug, _label = "%s/report" % batch, _depends_on = dependency, _type = 'c1', _max_runtime = 30)
        logging.info("k %s" % k)
        return k
    else:
        result = reporter.report(batch, params, remote, debug)
        return result
Esempio n. 29
0
def interpret_batches(study, portfolio, batchList, remote):
    search = cache.get("batch/%s-%s/search" % (study, portfolio), remote)
    batchName_ = search['batch_']
    if (batchList == "base"):
        return [batchName_[0]]
    elif (batchList == "*"):
        return batchName_
    else:
        batchNum_ = util.parse_number_list(batchList)
        return [batchName_[batchNum + 1] for batchNum in batchNum_]
Esempio n. 30
0
def dump_key(search, batch, remote, key, xpath, showSearchValues):
    elem = util.xpath_elem(cache.get("batch/%s/%s" % (batch, key), remote), xpath)
    outStr = ""
    if (showSearchValues):
        i = search['batch_'].index(batch)
        value_ = search['value__'][i]
        outStr += batch + "," + ",".join(map(str, value_)) + ","
    else:
        outStr += batch + "\n"
    outStr += str(elem) if xpath == "excel" else pp.pformat(elem)
    return outStr
Esempio n. 31
0
def countGitHubIssues(url):
    try:
        import client
        import cache
        import dom_parser2
        c = cache.get(client.request, 1, url)
        r = dom_parser2.parse_dom(
            c, 'div', {
                'class':
                re.compile(
                    'table-list-header-toggle\sstates\sfloat-left\spl-\d+')
            })
        r = dom_parser2.parse_dom(r, 'a')
        r = [
            re.sub('<.+?>', '', i.content).replace('\n', '').lstrip()
            for i in r
        ]
        return 'Issues: %s - %s' % (r[0], r[1])
    except:
        return 'Issues: ? Open - ? Closed'
Esempio n. 32
0
def get_current_theme(shop):
    """
    Get the currently active theme object.

    :param shop: The shop to get the active theme
    :type shop: E-Commerce.core.models.Shop
    :return: Theme object or None
    :rtype: Theme
    """
    value = cache.get(get_theme_cache_key(shop))
    if value:
        set_middleware_current_theme(value)
        return value

    theme = _get_current_theme(shop)
    cache.set(get_theme_cache_key(shop), theme)
    # set this theme as the current for this thread
    set_middleware_current_theme(theme)

    return theme
Esempio n. 33
0
def get_best_selling_product_info(shop_ids, cutoff_days=30):
    shop_ids = sorted(map(int, shop_ids))
    cutoff_date = datetime.date.today() - datetime.timedelta(days=cutoff_days)
    cache_key = "best_sellers:%r_%s" % (shop_ids, cutoff_date)
    sales_data = cache.get(cache_key)
    if sales_data is None:
        sales_data = (
            OrderLine.objects
            .filter(
                order__shop_id__in=shop_ids,
                order__order_date__gte=to_aware(cutoff_date),
                type=OrderLineType.PRODUCT
            )
            .values("product")
            .annotate(n=Sum("quantity"))
            .order_by("-n")[:100]
            .values_list("product", "product__variation_parent_id", "n")
        )
        cache.set(cache_key, sales_data, 3 * 60 * 60)  # three hours
    return sales_data
Esempio n. 34
0
def implements(hook_, sort=False, refresh=False):
    """
   Determine which plugins are implementing a hook.
  
   @param hook
     The name of the hook (e.g. "help" or "menu").
   @param sort
     By default, plugins are ordered by weight and filename,
     settings this option
     to True, plugin list will be ordered by plugin name.
   @param refresh
     For internal use only: Whether to force the stored list of hook
     implementations to be regenerated (such as after enabling a new plugin,
     before processing hook_enable).
   @return
     An array with the names of the plugins which are implementing this hook.
  """
    php.static(implements, 'implementations', {})
    if (refresh):
        implements.implementations = {}
    elif (not lib_bootstrap.MAINTENANCE_MODE and \
        php.empty(implements.implementations)):
        cache = lib_cache.get('hooks', 'cache_registry')
        if (cache):
            implements.implementations = cache.data
        implements.implementations = \
          lib_bootstrap.registry_get_hook_implementations_cache()
    if (not php.isset(implements.implementations, hook)):
        implements.implementations[hook_] = []
        for plugin_ in list_():
            if (hook(plugin_, hook_)):
                implements.implementations[hook_].append(plugin_)
    lib_bootstrap.registry_cache_hook_implementations({'hook' : hook_, \
      'plugins' : implements.implementations[hook_]})
    # The explicit cast forces a copy to be made. This is needed because
    # implementations[hook] is only a reference to an element of
    # implementations and if there are nested foreaches (due to nested node
    # API calls, for example), they would both manipulate the same array's
    # references, which causes some plugins' hooks not to be called.
    # See also http://www.zend.com/zend/art/ref-count.php.
    return implements.implementations[hook_]
def settable(*args):
    if args:
        cache.update({"table": args[0]})
    with impalaopen(":".join(settings.IMPALA)) as curr:
        query = "select level, count(distinct source) from " + database + "." + args[
            0] + "_good_graph group by level order by level desc limit 50;"
        curr.execute(query)
        graph_stat_string = ""
        num_levels = 2
        i = 0
        for line in curr:
            (level, nodes) = line
            if i == 0:
                num_levels = int(level)
            i = i + 1
            graph_stat_string = graph_stat_string + "Level: " + str(
                level) + ", " + str(nodes) + " nodes "

        query = "select min(dt), max(dt), min(cast(intersectx as double)), max(cast(intersectx as double)), min(cast(intersecty as double)), max(cast(intersecty as double)) from " + cache.get(
        ).get("database", "") + "." + cache.get().get(
            "table", ""
        ) + "_tracks_comms_joined where track_id != 'ship(1.0)' and track_id != 'ais(3.0)'"
        curr.execute(query)
        for line in curr:
            (mindt, maxdt, minlat, maxlat, minlon, maxlon) = line
            cache.update({
                "mindt": mindt,
                "maxdt": maxdt,
                "minlat": minlat,
                "minlon": minlon,
                "maxlat": maxlat,
                "maxlon": maxlon
            })

        cache.update({
            "graph_stat_string": graph_stat_string + "",
            "graph_num_levels": num_levels,
            "level": str(num_levels),
            "community": '-'
        })
    return "0"
Esempio n. 36
0
def query_cache(dbo, sql, age=60):
    """
    Runs the query given and caches the result
    for age seconds. If there's already a valid cached
    entry for the query, returns the cached result
    instead.
    If CACHE_COMMON_QUERIES is set to false, just runs the query
    without doing any caching and is equivalent to db.query()
    """
    if not CACHE_COMMON_QUERIES or not cache.available():
        return query(dbo, sql)
    cache_key = "%s:%s:%s" % (dbo.alias, dbo.database, sql.replace(" ", "_"))
    m = hashlib.md5()
    m.update(cache_key)
    cache_key = "q:%s" % m.hexdigest()
    results = cache.get(cache_key)
    if results is not None:
        return results
    results = query(dbo, sql)
    cache.put(cache_key, results, age)
    return results
Esempio n. 37
0
def get_data(url):
    global all_new_data
    
    
    
    
    
    time_to_save=int(Addon.getSetting("c_save_time"))
    #all_new_data=get_s_data(response,html_g,dp,start_time)
 
    all_new_data=get_s_data(url)
    all_new_data=cache.get(get_s_data,time_to_save,url, table='posters')


    all_new_data=sorted(all_new_data, key=lambda x: x[10], reverse=False)

    for new_name,icon,fan,plot,year,original_name,id,rating,genere,trailer,xxx,mode in all_new_data:
        if icon=='' and fan=='':
            addNolink(new_name,'www',199,False,iconimage=domain_s+'pbs.twimg.com/profile_images/421736697647218688/epigBm2J.jpeg',fanart='http://www.dream-wallpaper.com/free-wallpaper/cartoon-wallpaper/spawn-wallpaper/1280x1024/free-wallpaper-24.jpg')
        else:
            addDir3(new_name,'www',mode,icon,fan,plot,data=year,original_title=original_name,id=id,rating=rating,heb_name=new_name,show_original_year=year,isr=' ',generes=genere,trailer=trailer)
Esempio n. 38
0
def page_get_cache():
    """
   Retrieve the current page from the cache.
  
   Note: we do not serve cached pages when status messages are waiting (from
   a redirected form submission which was completed).
  
   @param status_only
     When set to TRUE, retrieve the status of the page cache only
     (whether it was started in this request or not).
  """
    cache = None
    if (not lib_appglobals.user.uid and \
        (php.SERVER['REQUEST_METHOD'] == 'GET' or \
        php.SERVER['REQUEST_METHOD'] == 'HEAD') and \
        php.count(drupal_set_message()) == 0):
        cache = lib_cache.get(lib_appglobals.base_root + request_uri(), \
          'cache_page')
        if (php.empty(cache)):
            ob_start()
    return cache
Esempio n. 39
0
    def request(self, endpoint, query = None):
        try:
            # Encode the queries, if there is any...
            if (query != None):
                query = '?' + urllib.urlencode(query)
            else:
                query = ''

            # Make the request
            request = self.api_url % (endpoint, query)

            # Send the request and get the response
            # Get the results from cache if available
            response = cache.get(client.request, 24, request)

            # Retrun the result as a dictionary
            return json.loads(response)
        except:
            pass

        return {}
Esempio n. 40
0
def test_get_items_from_dict_context():
    customer = factories.create_random_person()
    new_customer = factories.create_random_person()
    contact_group = factories.create_random_contact_group()
    contact_group.members.add(customer)

    context = {
        "customer": customer
    }
    items = _get_items_from_context(context)
    groups = context_cache._get_val(customer.groups.all())
    assert items["customer_groups"] == groups
    assert "customer" not in items
    # check whether items were cached
    assert cache.get("_ctx_cache:customer_%d" % customer.pk) == groups

    get_val_mock = mock.Mock(wraps=context_cache._get_val)
    with mock.patch.object(context_cache, "_get_val", new=get_val_mock):
        # get items again from the context, it shouldn't invoke _gel_val again for the customer
        get_val_mock.assert_not_called()
        items = _get_items_from_context(context)
        get_val_mock.assert_not_called()

    # check whether cache is bumped after changing contact
    get_val_mock = mock.Mock(wraps=context_cache._get_val)
    with mock.patch.object(context_cache, "_get_val", new=get_val_mock):
        customer.save()
        items = _get_items_from_context(context)
        get_val_mock.assert_called()

    # check whether cache is bumped after changing members of contact group
    get_val_mock = mock.Mock(wraps=context_cache._get_val)
    with mock.patch.object(context_cache, "_get_val", new=get_val_mock):
        items = _get_items_from_context(context)
        get_val_mock.assert_not_called()

        contact_group.members.add(new_customer)

        items = _get_items_from_context(context)
        get_val_mock.assert_called()
Esempio n. 41
0
def check(scraper):

    try:
        disable_check = xbmcaddon.Addon('plugin.video.xxx-o-dus').getSetting(
            'dev_scrapers')

        if (not disable_check == 'true'):
            scraperFile = xbmc.translatePath(
                os.path.join('special://home/addons/script.xxxodus.scrapers',
                             'lib/scrapers/%s.py' % scraper.lower()))
            scraperLink = 'https://raw.githubusercontent.com/xibalba10/script.xxxodus.scrapers/master/lib/scrapers/%s.py' % scraper.lower(
            )
            r = cache.get(client.request, 4, scraperLink)

            if len(r) > 1:
                with open(scraperFile, 'r') as f:
                    compfile = f.read()
                if 'import' in r:
                    if compfile == r:
                        log_utils.log(
                            '%s checked and up to date!' % scraper.title(),
                            log_utils.LOGNOTICE)
                        pass
                    else:
                        with open(scraperFile, 'w') as f:
                            f.write(r)
                        icon = xbmc.translatePath(
                            os.path.join(
                                'special://home/addons/script.xxxodus.artwork',
                                'resources/art/%s/icon.png' % scraper.lower()))
                        log_utils.log('%s updated!' % scraper.title(),
                                      log_utils.LOGNOTICE)
                        kodi.notify(msg='%s Updated.' % scraper.title(),
                                    duration=1250,
                                    sound=True,
                                    icon_path=icon)
    except Exception as e:
        log_utils.log(
            'Error checking for scraper update %s :: Error: %s' %
            (scraper.title(), str(e)), log_utils.LOGERROR)
Esempio n. 42
0
def get_currency_precision(currency):
    """
    Get precision by currency code.

    Precision values will be populated from the ``decimal_places``
    fields of the `Currency` objects in the database.

    :type currency: str
    :param currency: Currency code as 3-letter string (ISO-4217)

    :rtype: decimal.Decimal|None
    :return: Precision value for given currency code or None for unknown
    """
    cache_key = 'currency_precision:' + currency
    precision = cache.get(cache_key)
    if precision is None:
        currency_obj = Currency.objects.filter(code=currency).first()
        precision = (
            decimal.Decimal('0.1') ** currency_obj.decimal_places
            if currency_obj else None)
        cache.set(cache_key, precision)
    return precision
Esempio n. 43
0
    def display_unit(self):
        """
        Default display unit of this sales unit.

        Get a `DisplayUnit` object, which has this sales unit as its
        internal unit and is marked as a default, or if there is no
        default display unit for this sales unit, then a proxy object.
        The proxy object has the same display unit interface and mirrors
        the properties of the sales unit, such as symbol and decimals.

        :rtype: DisplayUnit
        """
        cache_key = "display_unit:sales_unit_{}_default_display_unit".format(self.pk)
        default_display_unit = cache.get(cache_key)

        if default_display_unit is None:
            default_display_unit = self.display_units.filter(default=True).first()
            # Set 0 to cache to prevent None values, which will not be a valid cache value
            # 0 will be invalid below, hence we prevent another query here
            cache.set(cache_key, default_display_unit or 0)

        return default_display_unit or SalesUnitAsDisplayUnit(self)
Esempio n. 44
0
def invoke_align(reference_filename, read_filename, max_errors):
    reference_filesig = cache.file_signature(reference_filename)
    reference_filename = reference_filesig[0]
    read_filesig = cache.file_signature(read_filename)
    read_filename = read_filesig[0]

    def callback(working_dir):
        print >> sys.stderr, 'Aligning'
        #Hmm
        old_stdout = sys.stdout
        sys.stdout = open(os.path.join(working_dir, 'hits.myr'), 'wb')

        try:
            assert align.main(
                [str(max_errors), '1', reference_filename, read_filename]) == 0
        finally:
            sys.stdout.close()
            sys.stdout = old_stdout

    return os.path.join(
        cache.get(('assess', 'invoke_align1', reference_filesig, read_filesig,
                   max_errors), callback), 'hits.myr')
Esempio n. 45
0
async def heatmap(startDate, endDate, requestTypes=[], ncList=[]):

    filters = {
        'startDate': startDate,
        'endDate': endDate,
        'requestTypes': requestTypes,
        'ncList': ncList
    }

    key = get_pins_cache_key(filters)

    # NOTE: pins get pulled for heatmap from cache (disk) even though it was
    # just added by pin cluster function. might refactor to in-memory cache
    pins = cache.get(key)

    fields = ['latitude', 'longitude']
    if pins is None:
        pins = requests.standard_query(fields, filters, table='map')
    else:
        pins = pins[fields]

    return pins.to_numpy()
Esempio n. 46
0
def output_plantuml(args, puml_input):
    """Output a PlantUML diagram."""
    ext = os.path.splitext(args.output or '')[-1][1:]
    mode = {'png': 'img', 'svg': 'svg', 'uml': None, '': None}[ext]
    server = (args.plantuml
              or os.getenv('SYSL_PLANTUML', 'http://localhost:8080/plantuml'))
    if mode:

        def calc():
            puml = plantuml.PlantUML('{}/{}/'.format(server, mode))
            response = requests.get(puml.get_url(puml_input))
            response.raise_for_status()
            return response.content

        out = cache.get(mode + ':' + puml_input, calc)

    useConfluence = args.output.startswith('confluence://')

    if args.verbose:
        print args.output + '...' * useConfluence,
        sys.stdout.flush()

    if useConfluence:
        if confluence.upload_attachment(args.output, cStringIO.StringIO(out),
                                        args.expire_cache,
                                        args.dry_run) is None:
            if args.verbose:
                print '\033[1;30m(no change)\033[0m',
        else:
            if args.verbose:
                print '\033[1;32muploaded\033[0m',
                if args.dry_run:
                    print '... not really (dry-run)',
    else:
        (open(args.output, 'w') if args.output else sys.stdout).write(out)
        # (open(args.output + '.puml', 'w') if args.output else sys.stdout).write(puml_input)

    if args.verbose:
        print
Esempio n. 47
0
    def get_list(self, mode, type, url, title_pattern, url_pattern, icon_pattern=None, site=None, d_p1=None, d_p2=None, d_p3=None, parse=None, cache_time=None,searched=False,stopend=False, isVideo=False, isDownloadable = False):

        if cache_time: r = cache.get(client.request,cache_time,url)
        else: r = client.request(url)

        if 're|' in d_p3:
            d_p3 = d_p3.replace('re|','')
            r = dom_parser2.parse_dom(r, d_p1, {d_p2: re.compile('%s' % d_p3)})
        else: r = dom_parser2.parse_dom(r, d_p1, {d_p2: d_p3})

        if r:
        
            dirlst = []
            
            for i in r:
                    name = re.findall(r'%s' % title_pattern,i.content)[0]
                    name = kodi.sortX(i[1].encode('utf-8'))
                    url = re.findall(r'%s' % url_pattern,i.content)[0]
                    if icon_pattern:
                        iconimage = re.findall(r'%s' % icon_pattern,i.content)[0]
                    elif site: iconimage = xbmc.translatePath(os.path.join('special://home/addons/script.wankbank.artwork', 'resources/art/%s/icon.png' % site))
                    else: iconimage = xbmc.translatePath(os.path.join('special://home/addons/' + kodi.get_id(), 'icon.png'))
                    fanarts = xbmc.translatePath(os.path.join('special://home/addons/script.wankbank.artwork', 'resources/art/%s/fanart.jpg' % site))
                    if parse: 
                        link,tag = parse.split('|SPLIT|')
                        if tag == 'url': 
                            url = urlparse.urljoin(link,url)
                        elif tag == 'icon': 
                            iconimage = urlparse.urljoin(link,iconimage)
                        else:
                            url = urlparse.urljoin(link,url)
                            iconimage = urlparse.urljoin(link,iconimage)
                    if site: url += '|SPLIT|' + site        
                    if type == 'dir': dirlst.append({'name': kodi.giveColor(name,'white'), 'url': url, 'mode': mode, 'icon': iconimage, 'fanart': fanarts, 'description': name, 'folder': True})
                    else: dirlst.append({'name': kodi.giveColor(name,'white'), 'url': url, 'mode': mode, 'icon': iconimage, 'fanart': fanarts, 'description': name, 'folder': False})

            if dirlst: 
                if stopend: buildDirectory(dirlst, stopend=True, isVideo=isVideo, isDownloadable=isDownloadable)
                else: buildDirectory(dirlst, isVideo=isVideo, isDownloadable=isDownloadable)
Esempio n. 48
0
def check_comment(user_email, artwork_id, comment_text):
    cache_key = COMMENT_CACHE_PREFIX + user_email
    cache_value = cache.get(cache_key)
    result = False
    if cache_value is None:
        result = True
    else:
        if datetime.now() - cache_value[VALUE_DATE] > COMMENT_MIN_INTERVAL:
            if artwork_id <> cache_value[VALUE_ARTWORK_ID]:
                result = True
            else:
                if comment_text <> cache_value[VALUE_COMMENT]:
                    result = True
                    
    cache_value = {
        VALUE_DATE: datetime.now(),
        VALUE_ARTWORK_ID: artwork_id,
        VALUE_COMMENT: comment_text
        }
    cache.add(cache_key, cache_value)
                    
    return result
Esempio n. 49
0
def _get_cached_value_from_context(context, key, value):
    cached_value = None

    # 1) check whether the value is cached inside the context as an attribute
    try:
        cache_key = "_ctx_cache_{}".format(key)
        cached_value = getattr(context, cache_key)
    except AttributeError:
        pass

    # 2) Check whether the value is cached in general cache
    # we can only cache objects that has `pk` attribute
    if cached_value is None and hasattr(value, "pk"):
        cache_key = "_ctx_cache:{}_{}".format(key, value.pk)
        cached_value = cache.get(cache_key)

    # 3) Nothing is cached, then read the value itself
    if cached_value is None:
        if key == "customer" and value:
            cached_value = _get_val(value.groups.all())
        else:
            cached_value = _get_val(value)

        # Set the value as attribute of the context
        # somethings this will raise AttributeError because the
        # context is not a valid object, like a dictionary
        try:
            cache_key = "_ctx_cache_{}".format(key)
            setattr(context, cache_key, cached_value)
        except AttributeError:
            pass

        # cache the value in the general cache
        if hasattr(value, "pk"):
            cache_key = "_ctx_cache:{}_{}".format(key, value.pk)
            cache.set(cache_key, cached_value)

    return cached_value
Esempio n. 50
0
def beautify(text, lang, options):
    """
    Process input `text` according to the specified `mode`.
    Adds comments if needed, according to the `lang` rules.
    Caches the results.
    The whole work (except caching) is done by _beautify().
    """

    options = options or {}
    beauty_options = dict((k, v) for k, v in options.items()
                          if k in ['add_comments', 'remove_text'])

    mode = ''
    if beauty_options.get('add_comments'):
        mode += 'c'
    if beauty_options.get('remove_text'):
        mode += 'q'

    if beauty_options == {}:
        # if mode is unknown, just don't transform the text at all
        return text

    if isinstance(text, str):
        text = text.encode('utf-8')
    digest = "t:%s:%s:%s" % (hashlib.md5(text).hexdigest(), lang, mode)

    # temporary added line that removes invalid cache entries
    # that used wrong commenting methods
    if lang in ["git", "django", "flask", "cmake"]:
        cache.delete(digest)

    answer = cache.get(digest)
    if answer:
        return answer
    answer = _beautify(text, lang, **beauty_options)
    cache.put(digest, answer)

    return answer
Esempio n. 51
0
    def break_links(self):
        """ One node that connects to a link in the middle is not connected;
        therefore, break_links is used for cutting at the middle point and
        building new links that connects nodes to links.
        """

        self.logger.info("Starts to break links")
        cache_enabled = Config.params["simulation"]["cache"]

        # Loads all_nodes from cache if exists
        if cache_enabled:
            hash_key = cache.get_hash([], self.nodes)
            cached = cache.get(hash_key)
            if cached:
                self.runways, self.taxiways, self.pushback_ways = cached
                self.logger.debug("Done breaking links using cache")
                return

        # Retrieve all nodes and links
        all_nodes = deepcopy(self.break_node)

        for link in self.links:
            all_nodes.append(link.start)
            all_nodes.append(link.end)
            self.__add_break_node(link.start)
            self.__add_break_node(link.end)

        index = 0
        while index < len(all_nodes):
            index = self.__break_next_link(all_nodes, index)

        self.logger.info("Done breaking links")
        self.__get_break_nodes()

        # Stores the result into cache for future usages
        if cache_enabled:
            to_cache = [self.runways, self.taxiways, self.pushback_ways]
            cache.put(hash_key, to_cache)
Esempio n. 52
0
def get_products_ordered_with(prod, count=20, request=None, language=None):
    cache_key = "ordered_with:%d" % prod.pk
    product_ids = cache.get(cache_key)
    if product_ids is None:
        # XXX: could this be optimized more? (and does it matter?)
        order_ids = (
            OrderLine.objects.filter(product=prod, type=OrderLineType.PRODUCT)
            .values_list("order__id", flat=True)
        )
        product_ids = (
            OrderLine.objects
            .filter(order_id__in=order_ids)
            .exclude(product=prod)
            .distinct()
            .values_list("product", flat=True)
        )
        cache.set(cache_key, set(product_ids), 4 * 60 * 60)
    return (
        Product.objects
        .all_visible(request, language=language)
        .filter(id__in=product_ids)
        .order_by("?")[:count]
    )
Esempio n. 53
0
def sample(read_files, n_samples):
    read_filesigs = [cache.file_signature(filename) for filename in read_files]
    read_files = [item[0] for item in read_filesigs]

    def callback(working_dir):
        print >> sys.stderr, 'Sampling'
        samples = []
        n = 0
        for item in sequence.sequence_files_iterator(read_files):
            n += 1
            if len(samples) < n_samples:
                samples.append(item)
            elif random.random() * n_samples < n:
                samples[random.randrange(n_samples)] = item

        outfile = open(os.path.join(working_dir, 'sample.fna'), 'wb')
        for item in samples:
            print >> outfile, '>%s' % item[0]
            print >> outfile, '%s' % sequence.string_from_sequence(item[1])

    result_dir = cache.get(('assess', 'sample', n_samples, read_filesigs),
                           callback)
    return os.path.join(result_dir, 'sample.fna')
Esempio n. 54
0
def get_articles(area):
    ret = []
    for person in Site.CONTEXT.config.staff.sections():
        feed = Site.CONTEXT.config.staff.get(person, config_field)

        cache_key = os.path.join(
            Site.CONTEXT.config.cache.get("cache","cache_dir"),
            md5(feed).hexdigest())

        blog = cache.get(cache_key,
                expires=Site.CONTEXT.config.cache.get("cache","expires"))

        if not blog:
            blog = feedparser.parse(feed)
            if not blog.has_key("status") or blog["status"] != 200:
                print "warning:innovation:%s:feed %s return a non valid status. Skipping..." % (area, feed)
                continue
            cache.set(cache_key, blog)

        if len(blog.feed) == 0:
            print "warning:innovation:%s:feed %s is not available" % (
                    area, feed
            )
            continue

        # Remove appeded title for category based feeds in wordpress
        if blog.feed.title.find(u"»") != -1:
            blog.feed.title = blog.feed.title.split(u"»")[0]

        for e in blog.entries:
            if getattr(e,"tags", False):
                terms =  map(lambda x:x["term"].lower(),e.tags)
                if "idea" in terms or "labs" or "lab" in terms:
                    if "%s" % area in terms:
                        ret.append({"link":e.link,"title":e.title})

    return ret
Esempio n. 55
0
def validateUser(emailAddress=None, showRegisteration=False):
    try:
        url = None
        if (emailAddress == None or emailAddress == ''):
            emailAddress = control.setting('user.email')
        if (emailAddress == None or emailAddress == '') and showRegisteration:
            control.dialog.ok(
                control.addonInfo('name'),
                "[COLOR red]User not registered.[/COLOR] [CR]Please provide the email address used to make the donation"
            )
            t = control.lang(30275).encode('utf-8')
            k = control.keyboard('', t)
            k.doModal()
            emailAddress = k.getText() if k.isConfirmed() else None
        elif (emailAddress == None or emailAddress == ''):
            return (control.INVALID, url)

        logger.debug('Validating User : %s' % emailAddress, __name__)
        valid = cache.get(validate, 168, emailAddress, table='live_cache')
        if valid == None:
            result = client.request(INVALID_MESSAGE_URL)
            control.dialog.ok(control.addonInfo('name'), result)
            control.setSetting('user.email', '')
            return (control.INVALID, '')
        elif valid <= 0:
            return valid, ''
        url = base64.b64decode(
            'aHR0cHM6Ly9vZmZzaG9yZWdpdC5jb20vdmluZWVndS9hZnRlcnNob2NrLXJlcG8vZ3VpZGVzLw=='
        )
        return (valid, url)

    except Exception as e:
        logger.error(e)
        result = client.request(INVALID_MESSAGE_URL)
        control.dialog.ok(control.addonInfo('name'), result)
        control.setSetting('user.email', '')
        return (control.INVALID, url)
Esempio n. 56
0
def get_series_ru(self, chat_id, message_id, search_query_id):
    search_query = db.get_search_query(search_query_id)
    series_number = search_query.query_text
    key = f"avtonomer.get_series_ru({series_number})"

    result = cache.get(key)
    if not result:
        result = avtonomer.search_ru(fastsearch="{}*{}".format(
            series_number[:1],
            series_number[1:],
        ), )
        if result is not None:
            result = result.total_results

    if result is None:
        logger.warning(f"No data for query {series_number}")
        bot.send_message(
            chat_id,
            "Нет данных",
            reply_to_message_id=message_id,
        )
        return

    cache.add(key, result, timedelta(minutes=5))

    url = avtonomer.get_series_ru_url(series_number)
    series_number = avtonomer.translate_to_cyrillic(series_number)
    message = (
        f"В серии [{series_number}]({url}) пока нет ни одного номера"
        if result == 0 else
        f"Количество фотографий в серии [{series_number}]({url}): {result}")
    bot.send_message(
        chat_id,
        message,
        parse_mode="Markdown",
        reply_to_message_id=message_id,
    )
Esempio n. 57
0
def get_cached_value(identifier, item, context, **kwargs):
    """
    Get item from context cache by identifier

    Accepts optional kwargs parameter `allow_cache` which will skip
    fetching the actual cached object. When `allow_cache` is set to
    False only cache key for identifier, item, context combination is
    returned.

    :param identifier: Any
    :type identifier: string
    :param item: Any
    :param context: Any
    :type context: dict
    :return: Cache key and cached value if allowed
    :rtype: tuple(str, object)
    """
    allow_cache = True
    if "allow_cache" in kwargs:
        allow_cache = kwargs.pop("allow_cache")
    key = get_cache_key_for_context(identifier, item, context, **kwargs)
    if not allow_cache:
        return key, None
    return key, cache.get(key)
Esempio n. 58
0
def last_query(client_id):
    """
    Return the last query for the client `client_id`
    """
    return cache.get("l:%s" % client_id)
Esempio n. 59
0
    def get_answers(
            self,
            topic: str,
            request_options: Dict[str, str] = None) -> List[Dict[str, Any]]:
        """
        Find cheat sheets for the topic.

        Args:
            `topic` (str):    the name of the topic of the cheat sheet

        Returns:
            [answer_dict]:    list of answers (dictionaries)
        """

        # if topic specified as <topic_type>:<topic>,
        # cut <topic_type> off
        topic_type = ""
        if re.match("[^/]+:", topic):
            topic_type, topic = topic.split(":", 1)

        topic = self.handle_if_random_request(topic)
        topic_types = self.get_topic_type(topic)

        # if topic_type is specified explicitly,
        # show pages only of that type
        if topic_type and topic_type in topic_types:
            topic_types = [topic_type]

        # 'question' queries are pretty expensive, that's why they should be handled
        # in a special way:
        # we do not drop the old style cache entries and try to reuse them if possible
        if topic_types == ['question']:
            answer = cache.get('q:' + topic)
            if answer:
                if isinstance(answer, dict):
                    return [answer]
                return [{
                    'topic': topic,
                    'topic_type': 'question',
                    'answer': answer,
                    'format': 'text+code',
                }]

            answer = self._get_page_dict(topic,
                                         topic_types[0],
                                         request_options=request_options)
            if answer.get("cache", True):
                cache.put('q:' + topic, answer)
            return [answer]

        # Try to find cacheable queries in the cache.
        # If answer was not found in the cache, resolve it in a normal way and save in the cache
        answers = []
        for topic_type in topic_types:

            cache_entry_name = f"{topic_type}:{topic}"
            cache_needed = self._adapter[topic_type].is_cache_needed()

            if cache_needed:
                answer = cache.get(cache_entry_name)
                if not isinstance(answer, dict):
                    answer = None
                if answer:
                    answers.append(answer)
                    continue

            answer = self._get_page_dict(topic,
                                         topic_type,
                                         request_options=request_options)
            if isinstance(answer, dict):
                if "cache" in answer:
                    cache_needed = answer["cache"]

            if cache_needed and answer:
                cache.put(cache_entry_name, answer)

            answers.append(answer)

        return answers
Esempio n. 60
0
def request(url, close=True, redirect=True, error=False, proxy=None, post=None, headers=None, mobile=False, XHR=False, limit=None, referer=None, cookie=None, compression=True, output='', timeout='30'):
    try:
        handlers = []

        if not proxy == None:
            handlers += [urllib2.ProxyHandler({'http':'%s' % (proxy)}), urllib2.HTTPHandler]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)


        if output == 'cookie' or output == 'extended' or not close == True:
            cookies = cookielib.LWPCookieJar()
            handlers += [urllib2.HTTPHandler(), urllib2.HTTPSHandler(), urllib2.HTTPCookieProcessor(cookies)]
            opener = urllib2.build_opener(*handlers)
            opener = urllib2.install_opener(opener)

        if (2, 7, 9) <= sys.version_info < (2, 7, 11):
            try:
                import ssl; ssl_context = ssl.create_default_context()
                ssl_context.check_hostname = False
                ssl_context.verify_mode = ssl.CERT_NONE
                handlers += [urllib2.HTTPSHandler(context=ssl_context)]
                opener = urllib2.build_opener(*handlers)
                opener = urllib2.install_opener(opener)
            except:
                pass

        if url.startswith('//'): url = 'http:' + url

        try: headers.update(headers)
        except: headers = {}
        if 'User-Agent' in headers:
            pass
        elif not mobile == True:
            #headers['User-Agent'] = agent()
            headers['User-Agent'] = cache.get(randomagent, 1)
        else:
            headers['User-Agent'] = 'Apple-iPhone/701.341'
        if 'Referer' in headers:
            pass
        elif referer == None:
            headers['Referer'] = '%s://%s/' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)
        else:
            headers['Referer'] = referer
        if not 'Accept-Language' in headers:
            headers['Accept-Language'] = 'en-US'
        if 'X-Requested-With' in headers:
            pass
        elif XHR == True:
            headers['X-Requested-With'] = 'XMLHttpRequest'
        if 'Cookie' in headers:
            pass
        elif not cookie == None:
            headers['Cookie'] = cookie
        if 'Accept-Encoding' in headers:
            pass
        elif compression and limit is None:
            headers['Accept-Encoding'] = 'gzip'


        if redirect == False:

            class NoRedirection(urllib2.HTTPErrorProcessor):
                def http_response(self, request, response): return response

            opener = urllib2.build_opener(NoRedirection)
            opener = urllib2.install_opener(opener)

            try: del headers['Referer']
            except: pass


        request = urllib2.Request(url, data=post, headers=headers)


        try:
            response = urllib2.urlopen(request, timeout=int(timeout))
        except urllib2.HTTPError as response:

            if response.code == 503:
                cf_result = response.read(5242880)
                try: encoding = response.info().getheader('Content-Encoding')
                except: encoding = None
                if encoding == 'gzip':
                    cf_result = gzip.GzipFile(fileobj=StringIO.StringIO(cf_result)).read()

                if 'cf-browser-verification' in cf_result:

                    netloc = '%s://%s' % (urlparse.urlparse(url).scheme, urlparse.urlparse(url).netloc)

                    ua = headers['User-Agent']

                    cf = cache.get(cfcookie().get, 168, netloc, ua, timeout)

                    headers['Cookie'] = cf

                    request = urllib2.Request(url, data=post, headers=headers)

                    response = urllib2.urlopen(request, timeout=int(timeout))

                elif error == False:
                    return

            elif error == False:
                return


        if output == 'cookie':
            try: result = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])
            except: pass
            try: result = cf
            except: pass
            if close == True: response.close()
            return result

        elif output == 'geturl':
            result = response.geturl()
            if close == True: response.close()
            return result

        elif output == 'headers':
            result = response.headers
            if close == True: response.close()
            return result

        elif output == 'chunk':
            try: content = int(response.headers['Content-Length'])
            except: content = (2049 * 1024)
            if content < (2048 * 1024): return
            result = response.read(16 * 1024)
            if close == True: response.close()
            return result


        if limit == '0':
            result = response.read(224 * 1024)
        elif not limit == None:
            result = response.read(int(limit) * 1024)
        else:
            result = response.read(5242880)

        try: encoding = response.info().getheader('Content-Encoding')
        except: encoding = None
        if encoding == 'gzip':
            result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read()


        if 'sucuri_cloudproxy_js' in result:
            su = sucuri().get(result)

            headers['Cookie'] = su

            request = urllib2.Request(url, data=post, headers=headers)

            response = urllib2.urlopen(request, timeout=int(timeout))

            if limit == '0':
                result = response.read(224 * 1024)
            elif not limit == None:
                result = response.read(int(limit) * 1024)
            else:
                result = response.read(5242880)

            try: encoding = response.info().getheader('Content-Encoding')
            except: encoding = None
            if encoding == 'gzip':
                result = gzip.GzipFile(fileobj=StringIO.StringIO(result)).read()


        if output == 'extended':
            response_headers = response.headers
            response_code = str(response.code)
            try: cookie = '; '.join(['%s=%s' % (i.name, i.value) for i in cookies])
            except: pass
            try: cookie = cf
            except: pass
            if close == True: response.close()
            return (result, response_code, response_headers, headers, cookie)
        else:
            if close == True: response.close()
            return result
    except:
        return