def get_current_round_cached(self): cached_key = "%s_current_round_object" % self.slug if self.current_round: cache.get_or_set(cached_key, self.current_round, None) return cache.get(cached_key) else: return None
def get_subject(self): subject_value, subject_value_pk = 5000000, 0 subjects = self.delivery.subject_set.all().order_by('pk', ) for subject in subjects: try: subject_cache_value = cache.get_or_set( key='subject_cache_pk_{0}'.format(subject.pk, ), value=subject.chance, timeout=259200, ) except AttributeError: subject_cache_value = cache.get( key='subject_cache_pk_{0}'.format(subject.pk, ), ) if not subject_cache_value: subject_cache_value = subject.chance cache.set( key='subject_cache_pk_{0}'.format(subject.pk, ), value=subject.chance, timeout=259200, ) # 60 sec * 60 min * 24 hour * 3 if subject_cache_value < subject_value: subject_value, subject_value_pk = subject_cache_value, subject.pk subject = subjects.get(pk=subject_value_pk, ) cache.set( key='subject_cache_pk_{0}'.format(subject.pk, ), value=subject_cache_value + subject.chance, timeout=259200, ) # 60 sec * 60 min * 24 hour * 3 return subject.pk, self.choice_str_in_tmpl(tmpl=subject.subject)
def map_countries_served(container_id='projectmap_container', arc=False, height=None, projection='equirectangular', responsive=False): countries = cache.get_or_set( 'projectmap_countries_served', limsfm_get_project_countries_served(), settings.LIMS_STATS_CACHE_TIMEOUT ) map_data = {} arc_data = [] for c in countries: iso3 = c['iso3'] map_data[iso3] = {'fillKey': 'served'} if iso3 != 'GBR': arc_data.append({'origin': iso3, 'destination': 'GBR'}) map_json = mark_safe(json.dumps(map_data)) arc_json = mark_safe(json.dumps(arc_data)) if arc else None return({ 'container_id': container_id, 'countries': countries, 'map_json': map_json, 'arc_json': arc_json, 'height': height, 'projection': projection, 'responsive': responsive, })
def is_cooldown_finished(self): last_time = cache.get_or_set(self.CACHE_TIME_ID, 0) current_time = time.time() if last_time + settings.EMAIL_COOLDOWN_SECS < current_time: cache.set(self.CACHE_TIME_ID, current_time) return True return False
def ticket_service_view(request): if request.method == 'POST': method, data = 'post', request.POST else: method, data = 'get', request.GET client_ip = request.META.get('REMOTE_ADDR') logger.info("Request from {}".format(client_ip)) matched_ips = cache.keys('{}_status'.format(client_ip)) if len(matched_ips) > 0: if cache.get('{}_status'.format(client_ip)) == 'w': return call_third_party(method, data) else: return JsonResponse({'detail': 'Permission Denied.'}) else: g_recaptcha_response = request.POST.get('g-recaptcha-response', []) if g_recaptcha_response != '' and check_captcha(g_recaptcha_response, client_ip): cache.set('{}_status'.format(client_ip), 'w') return call_third_party(method, data) else: unathorised_request_num = cache.get_or_set('{}_unathorised_request_num'.format(client_ip), 0) if unathorised_request_num < settings.MAX_UNATHORISED_REQUEST_NUM: cache.incr('{}_unathorised_request_num'.format(client_ip)) template = loader.get_template('ticket_template.html') context = { 'site_key': settings.RECAPTCHA_SITEKEY, 'data': data } return HttpResponse(template.render(context, request)) return HttpResponse('You are not allowed to access this service anymore.')
def getFeedsFromSite(site): """ Takes 'site' in form of an URL as an Argument. Fetches the site, parses it, finds embedded links. """ from urllib.parse import urlparse parser = feedFinder() sitecomponents = urlparse(site) html = cache.get_or_set(site, requests.get(site), 10600) parser.feed(html.text) result = [] links = list(filter(lambda x: "type" in x, parser.links)) rsslist = list(filter(lambda x: "application/rss" in x['type'], links)) for link in rsslist: feed = link.get('href') feedcomponents = urlparse(feed) if feedcomponents.netloc is ("" or None): feed = sitecomponents.netloc + feedcomponents.path result.append(feed) logger.info("appended %s to result", feed) return result
async def observer_evaluate(self, message): """Execute observer evaluation on the worker or throttle.""" observer_id = message['observer'] throttle_rate = get_queryobserver_settings()['throttle_rate'] if throttle_rate <= 0: await self._evaluate(observer_id) return cache_key = throttle_cache_key(observer_id) try: count = cache.incr(cache_key) # Ignore if delayed observer already scheduled. if count == 2: await self.channel_layer.send( CHANNEL_MAIN, { 'type': TYPE_POLL, 'observer': observer_id, 'interval': throttle_rate, }, ) except ValueError: count = cache.get_or_set(cache_key, default=1, timeout=throttle_rate) # Ignore if cache was set and increased in another thread. if count == 1: await self._evaluate(observer_id)
def get_body_raw(self): body_value, body_value_pk = 5000000, 0 bodies = self.delivery.body_set.all().order_by('pk', ) for body in bodies: try: body_cache_value = cache.get_or_set( key='body_cache_pk_{0}'.format(body.pk, ), value=body.chance, timeout=259200, ) except AttributeError: body_cache_value = cache.get( key='body_cache_pk_{0}'.format(body.pk, ), ) if not body_cache_value: body_cache_value = body.chance cache.set( key='body_cache_pk_{0}'.format(body.pk, ), value=body.chance, timeout=259200, ) # 60 sec * 60 min * 24 hour * 3 if body_cache_value < body_value: body_value, body_value_pk = body_cache_value, body.pk body = bodies.get(pk=body_value_pk, ) cache.set( key='body_cache_pk_{0}'.format(body.pk, ), value=body_cache_value + body.chance, timeout=259200, ) # 60 sec * 60 min * 24 hour * 3 return body.html
def contains_product(self, product): _super = super() def _inner(): return _super.contains_product(product) key = 'oscarbluelight.models.Range.{}-{}.contains_product.{}'.format(self.pk, self.cache_version, product.pk) return cache.get_or_set(key, _inner, self._cache_ttl)
def calculate_results(course, force_recalculation=False): if course.state != "published": return _calculate_results_impl(course) cache_key = 'evap.staff.results.tools.calculate_results-{:d}'.format(course.id) if force_recalculation: cache.delete(cache_key) return cache.get_or_set(cache_key, partial(_calculate_results_impl, course), None)
def _cache(self) -> Dict[str, Any]: if self._cached_obj is None: self._cached_obj = cache.get_or_set( 'settings_{}_{}'.format(self._obj.settings_namespace, self._obj.pk), lambda: {s.key: s.value for s in self._obj.setting_objects.all()}, timeout=1800 ) return self._cached_obj
def test_get_or_set_version(self): cache.get_or_set('brian', 1979, version=2) with self.assertRaisesMessage(ValueError, 'You need to specify a value.'): cache.get_or_set('brian') with self.assertRaisesMessage(ValueError, 'You need to specify a value.'): cache.get_or_set('brian', version=1) self.assertIsNone(cache.get('brian', version=1)) self.assertEqual(cache.get_or_set('brian', 42, version=1), 42) self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979) self.assertIsNone(cache.get('brian', version=3))
def get_version_info(): response = { 'latest_major': True, 'latest_minor': True, 'latest_patch': True, 'current_version': __version__, } try: tags = cache.get_or_set('github_release_tags', get_github_tags, TAG_CACHE_TIME) current_ver = semver.Version.coerce(__version__) # Set them all to the current version to start # If the server has only earlier or the same version # then this will become the major/minor/patch versions latest_major = current_ver latest_minor = current_ver latest_patch = current_ver response.update({ 'latest_major_version': str(latest_major), 'latest_minor_version': str(latest_minor), 'latest_patch_version': str(latest_patch), }) for tag in tags: tag_name = tag.get('tag_name') if tag_name[0] == 'v': # Strip 'v' off front of verison if it exists tag_name = tag_name[1:] try: tag_ver = semver.Version.coerce(tag_name) except ValueError: tag_ver = semver.Version('0.0.0', partial=True) if tag_ver > current_ver: if latest_major is None or tag_ver > latest_major: latest_major = tag_ver response['latest_major_version'] = tag_name response['latest_major_url'] = tag['html_url'] if tag_ver.major > current_ver.major: response['latest_major'] = False elif tag_ver.major == current_ver.major: if latest_minor is None or tag_ver > latest_minor: latest_minor = tag_ver response['latest_minor_version'] = tag_name response['latest_minor_url'] = tag['html_url'] if tag_ver.minor > current_ver.minor: response['latest_minor'] = False elif tag_ver.minor == current_ver.minor: if latest_patch is None or tag_ver > latest_patch: latest_patch = tag_ver response['latest_patch_version'] = tag_name response['latest_patch_url'] = tag['html_url'] if tag_ver.patch > current_ver.patch: response['latest_patch'] = False except requests.RequestException: logger.exception('Error while getting github release tags') return response
def countries_served(): try: return cache.get_or_set( 'projectmap_countries_served', limsfm_get_project_countries_served(), settings.LIMS_STATS_CACHE_TIMEOUT ) except RequestException as e: return None
def __get_object(self, obj_class, obj_id, new=False): cache_key_name = self.format_cache_key_name(obj_class, obj_id) method = getattr(self.provider, 'get_%s' % obj_class.__name__.lower()) if new: obj = method(obj_id) cache.set(cache_key_name, obj, OBJ_CACHE_DURATION) else: obj = cache.get_or_set(cache_key_name, method(obj_id), OBJ_CACHE_DURATION) obj.provider = self return obj
def get_channel_id_token(self): key = "{prefix}_channel_id_token_{key}".format( prefix=self.CHANNEL_TOKEN_CACHE_KEY_PREFIX, key=self.key ) return cache.get_or_set( key, self.channel.get_channel_id_token, self.CHANNEL_TOKEN_CACHE_TIMEOUT, )
def __group_id_to_name(g_id): def get_group_name(): groups = DiscourseManager._get_groups() for g in groups: if g['id'] == g_id: return g['name'] raise KeyError("Group ID %s not found on Discourse" % g_id) return cache.get_or_set(DiscourseManager._generate_cache_group_id_key(g_id), get_group_name, GROUP_CACHE_MAX_AGE)
def _group_name_to_id(name): name = DiscordOAuthManager._sanitize_groupname(name) def get_or_make_role(): groups = DiscordOAuthManager._get_groups() for g in groups: if g['name'] == name: return g['id'] return DiscordOAuthManager._create_group(name)['id'] return cache.get_or_set(DiscordOAuthManager._generate_cache_role_key(name), get_or_make_role, GROUP_CACHE_MAX_AGE)
def get_rate(request): # TODO get actual rate timestamp = math.floor(time.time()) rate = cache.get_or_set('rate', 0, 1) rate = cache.incr('rate') response = JsonResponse({"rate": rate}) response['X-Http-Rate'] = rate return response
def get_themes(self): try: data = cache.get_or_set( 'BOOTSWATCH_API_DATA', lambda: requests.get(BOOTSWATCH_API).json(), 3600 ) return data['themes'] except Exception as err: logger.exception(err) return []
def get_date_modified(self): key = "{prefix}_{key}".format( prefix=self.CHANNEL_DATE_MODIFIED_KEY_PREFIX, key=self.key ) return cache.get_or_set( key, self.channel.get_date_modified, self.CHANNEL_DATE_MODIFIED_CACHE_TIMEOUT, )
def get_resource_count(self): key = "{prefix}_{key}".format( prefix=self.CHANNEL_RESOURCE_COUNT_KEY_PREFIX, key=self.key, ) return cache.get_or_set( key, self.channel.get_resource_count, self.CHANNEL_RESOURCE_COUNT_CACHE_TIMEOUT, )
def get_notifications(): response = { 'notifications': list(), } try: notifications = cache.get_or_set('github_notification_issues', get_github_notification_issues, NOTIFICATION_CACHE_TIME) # Limit notifications to those posted by repo owners and members response['notifications'] += [n for n in notifications if n['author_association'] in ['OWNER', 'MEMBER']][:5] except requests.RequestException: logger.exception('Error while getting github notifications') return response
def __group_name_to_id(name): name = DiscourseManager._sanitize_groupname(name) def get_or_create_group(): groups = DiscourseManager._get_groups() for g in groups: if g['name'].lower() == name.lower(): return g['id'] return DiscourseManager._create_group(name)['id'] return cache.get_or_set(DiscourseManager._generate_cache_group_name_key(name), get_or_create_group, GROUP_CACHE_MAX_AGE)
def test_get_or_set_version(self): cache.get_or_set('brian', 1979, version=2) with pytest.raises(ValueError) as excinfo: cache.get_or_set('brian') assert 'You need to specify a value.' in str(excinfo.value) with pytest.raises(ValueError): cache.get_or_set('brian', version=1) assert 'You need to specify a value.' in str(excinfo.value) assert cache.get('brian', version=1) is None assert cache.get_or_set('brian', 42, version=1) == 42 assert cache.get_or_set('brian', 1979, version=2) == 1979 assert cache.get('brian', version=3) is None
def show_item(request, link, subsection_link, item_link): item = ItemModel.objects.get(link=item_link) try: in_cart = request.user.cart.cartitem_set.get(item=item).amount except Exception: in_cart = 0 rand_items = cache.get_or_set('rand_items', ItemModel.objects.filter(subclass=subsection_link).exclude(link=item.link).order_by('?')[:4], 300) return render(request, 'show.html', {'item': item, 'rand_items': rand_items, 'section': item.section, 'subsection': item.subclass, 'in_cart': in_cart, })
def main_page(request): books = cache.get_or_set('books', Book.objects.all(), 10) genres = Genre.objects.all() book_genres = [] for book in books: book_genres.append(books[0].genres.all()) return render(request, 'shop/main_page.html', { 'books': books, 'genres': genres, 'auth': request.user.is_authenticated(), })
def index(request): post_list = cache.get_or_set('post_list', Post.objects.all()) paginator = Paginator(post_list, 5) page = request.GET.get('page') try: posts = paginator.page(page) except PageNotAnInteger: posts = paginator.page(1) except EmptyPage: posts = paginator.page(paginator.num_pages) context = {'posts': posts} return render(request, 'posts/view.html', context)
def create(cls, website, url): """ Create a :py:mod:`feeds.model.Feed` object """ feed = cls(website=website, feed_url=url) feedcontent = cache.get_or_set(url, requests.get(url), 10600) parsed = feedparser.parse(feedcontent) feed.title = parsed.feed.get('title', '')[0:200] feed.tagline = parsed.feed.get('subtitle', '')[:64] feed.copyright = parsed.feed.get('copyright', '')[:64] feed.author = parsed.feed.get('author', '')[:64] feed.logo = parsed.feed.get('logo', None) feed.webmaster = parsed.feed.get('webmaster', '')[:64] return feed
def get_head_to_head_winrates( lookback, offset, game_types, regions, min_rank, max_rank, archetypes ): lookback_window_start = str(int(lookback) + int(offset)) lookback_window_end = offset query_params = ( lookback_window_start, lookback_window_end, game_types, regions, min_rank, max_rank, ) filter_expr = WINRATES_BY_ARCHETYPE_FILTER_TEMPLATE % query_params query = WINRATES_BY_ARCHETYPE_QUERY_TEMPLATE % ( filter_expr, filter_expr, filter_expr, filter_expr, archetypes, archetypes ) def gen_cache_value(): return _generate_win_rates_by_archetype_table_from_db(query) m = hashlib.md5() m.update(offset.encode("utf8")) m.update(lookback.encode("utf8")) m.update(game_types.encode("utf8")) m.update(regions.encode("utf8")) m.update(min_rank.encode("utf8")) m.update(max_rank.encode("utf8")) m.update(archetypes.encode("utf8")) cache_key = m.hexdigest() win_rates_table, archetype_frequencies, expected_winrates = cache.get_or_set( cache_key, gen_cache_value, timeout=10 ) return win_rates_table, archetype_frequencies, expected_winrates
def __call__(self, request, *args, **kwargs): keyname = self.__class__.__name__ return cache.get_or_set(keyname , super(RssPostFeed, self).__call__(request, *args, **kwargs), settings.CACHE_MEDIUM)
def test_get_or_set_callable_returning_none(self): self.assertIsNone(cache.get_or_set('mykey', lambda: None)) # Previous get_or_set() doesn't store None in the cache. self.assertEqual(cache.get('mykey', 'default'), 'default')
def test_get_or_set_racing(self): with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add: # Simulate cache.add() failing to add a value. In that case, the # default value should be returned. cache_add.return_value = False self.assertEqual(cache.get_or_set('key', 'default'), 'default')
def zhihu_spider_friend(username, friend): spider = ZhihuSpider() lab = Preprocess('', 1000) svm_model = svm_load_model(settings.BASE_DIR + '/closends/svm/svm.model') start_time = datetime.now() + timedelta(days=-delta_days) start_time = start_time.strftime('%Y-%m-%d %H:%M:%S') time_2 = time.mktime(time.strptime(start_time, '%Y-%m-%d %H:%M:%S')) time_1 = time.time() try: zhihus = spider.scrape_user_activities(friend['zhihu_ID'], before=time_1, after=time_2, number=scrape_num) except: with codecs.open('zhihu_spider_error.txt', 'a', encoding='utf8') as fw: current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) time_1 = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time_1)) time_2 = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time_2)) fw.write(current_time + '\t' + username + '\t' + friend['zhihu_ID'] + '\t' + str(time_1) + '\t' + str(time_2) + '\n') return for zhihu in zhihus: zhihu = zhihu.convert_format() try: vector = lab.text_preprocess(zhihu['target_content']) p_label, _, _ = svm_predict([ 0, ], [ vector, ], svm_model) content = ZhihuContent( pub_date=zhihu['pub_date'], action_type=zhihu['action_type'], target_user_name=zhihu['target_user_name'], target_user_head=zhihu['target_user_head'], target_user_url=zhihu['target_user_url'], target_user_headline=zhihu['target_user_headline'], target_title=zhihu['target_title'], target_title_url=zhihu['target_title_url'], target_content=zhihu['target_content'], target_content_url=zhihu['target_content_url'], topic=topic_name[int(p_label[0]) - 1], friend_id=friend['id']) if zhihu['cover_image']: content.cover_image = zhihu['cover_image'] content.save() except: pass updating_list = cache.get('zhihu_updating_list') updating_list.remove(friend['id']) cache.set('zhihu_updating_list', updating_list, None) updated_list = cache.get_or_set('updated_list', set()) updated_list.add(username) cache.set('updated_list', updated_list, 5 * 60) print("爬取完毕, 知乎初次抓取了" + str(len(zhihus)) + '条动态!')
def get_interfaces_list(self, node=settings.PROXMOX_NODE_NAME, type='bridge'): return cache.get_or_set('network_bridges_{node}'.format(node=node), self.nodes(node).network.get(type=type), 20)
def wrapper(*args, **kwargs): def wrapped_func(): return func(*args, **kwargs) key = memoize_key(prefix, *args, **kwargs) return cache.get_or_set(key, wrapped_func, timeout=DEFAULT_TIMEOUT)
def post(self, request, *args, **kwargs): cache.delete('music') cache.get_or_set('music', False, 60 * 60) print(cache.get('music')) return Response({})
def mvs(request): movies_all = cache.get_or_set('movies_all', Movie.objects.all()) mvs_serialized = serializers.serialize('json', movies_all) return JsonResponse(mvs_serialized, safe=False)
def get_user_by_name(username): return cache.get_or_set(f'user_{username}', lambda: _get_user(username), 5 * 60)
def movies(request): movies_all = cache.get_or_set('movies_all', Movie.objects.all()) serializer = MovieSerializer(movies_all, many=True) return Response(serializer.data)
def weibo_spider_friend(username, friend): spider = WeiboSpider() lab = Preprocess('', 1000) svm_model = svm_load_model(settings.BASE_DIR + '/closends/svm/svm.model') start_time = datetime.now() + timedelta(days=-delta_days) start_time = start_time.strftime('%Y-%m-%d %H:%M:%S') time_2 = time.mktime(time.strptime(start_time, '%Y-%m-%d %H:%M:%S')) time_1 = time.time() try: weibos = spider.scrape_user_weibo(int(friend['weibo_ID']), before=time_1, after=time_2, number=scrape_num) except: with codecs.open('weibo_spider_error.txt', 'a', encoding='utf8') as fw: current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) time_1 = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time_1)) time_2 = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time_2)) fw.write(current_time + '\t' + username + '\t' + friend['weibo_ID'] + '\t' + str(time_1) + '\t' + str(time_2) + '\n') return for weibo in weibos: weibo = weibo.convert_format() try: if len(weibo) == 6: has_image = len(weibo['images']) > 0 vector = lab.text_preprocess(weibo['content']) p_label, _, _ = svm_predict([ 0, ], [ vector, ], svm_model) content = WeiboContent(pub_date=weibo['pub_date'], src_url=weibo['src_url'], content=weibo['content'], is_repost=weibo['is_repost'], has_image=has_image, video_image=weibo['video_image'], topic=topic_name[int(p_label[0]) - 1], friend_id=friend['id']) content.save() if has_image: for image_url in weibo['images']: Image(content_object=content, image_url=image_url).save() else: has_image = len(weibo['origin_images']) > 0 vector = lab.text_preprocess(weibo['content']) p_label, _, _ = svm_predict([ 0, ], [ vector, ], svm_model) content = WeiboContent( pub_date=weibo['pub_date'], src_url=weibo['src_url'], content=weibo['content'], is_repost=weibo['is_repost'], has_image=False, video_image=weibo['video_image'], origin_account=weibo['origin_account'], origin_link=weibo['origin_link'], origin_pub_date=weibo['origin_pub_date'], origin_src_url=weibo['origin_src_url'], origin_content=weibo['origin_content'], origin_has_image=has_image, origin_video_image=weibo['origin_video_image'], topic=topic_name[int(p_label[0]) - 1], friend_id=friend['id']) content.save() if has_image: for image_url in weibo['origin_images']: Image(content_object=content, image_url=image_url).save() except: pass updating_list = cache.get('weibo_updating_list') updating_list.remove(friend['id']) cache.set('weibo_updating_list', updating_list, None) updated_list = cache.get_or_set('updated_list', set()) updated_list.add(username) cache.set('updated_list', updated_list, 5 * 60) print("爬取完毕, 微博初次抓取了" + str(len(weibos)) + '条动态!')
def wrapper(*args, **kwargs): if cache.get_or_set("proxmox_reachable", args[0].reachable, 15) is True: return func(*args, **kwargs) else: raise ProxmoxNotConnectedException
def get_vm(self, vmid: str, node: str=settings.PROXMOX_NODE_NAME): return cache.get_or_set('vm_{vmid}'.format(vmid=vmid, node=node), self.nodes(node).qemu(vmid).get(),10)
def get_vms(self, node=settings.PROXMOX_NODE_NAME): return cache.get_or_set('vms_{node}'.format(node=node), self.nodes(node).qemu.get())
def services(self, request, *args, **kwargs): results = self.model.services # fake out pagination for now pagination = {'results': results, 'count': len(results)} return Response( data=cache.get_or_set("resources:services", pagination))
'model_name': opts.model_name, 'verbose_name': opts.verbose_name, 'icon': opts.icon, 'icon_color': 'text-' + opts.icon_color, 'level': opts.level, } model_names.append(meta) counts = list(set([i.get('level') for i in model_names])) new_menus = [] for i in counts: new_menus.append([c for c in model_names if c.get('level') == i]) return new_menus system_menus_key = utils.make_template_fragment_key('system.menus') system_menus = cache.get_or_set(system_menus_key, construct_menus(), 360) def get_user_config(user, mark, model): content_type = get_content_type_for_model(model) configs = Configure.objects.filter( creator=user, mark=mark, content_type=content_type).order_by('-pk') if configs.exists(): config = configs.first().content try: return json.loads(config) except BaseException: return None else: return None
def snapshot_icons(snapshot) -> str: cache_key = f'{str(snapshot.id)[:12]}-{(snapshot.updated or snapshot.added).timestamp()}-snapshot-icons' def calc_snapshot_icons(): from core.models import EXTRACTORS # start = datetime.now() archive_results = snapshot.archiveresult_set.filter(status="succeeded", output__isnull=False) link = snapshot.as_link() path = link.archive_path canon = link.canonical_outputs() output = "" output_template = '<a href="/{}/{}" class="exists-{}" title="{}">{}</a> ' icons = { "singlefile": "❶", "wget": "🆆", "dom": "🅷", "pdf": "📄", "screenshot": "💻", "media": "📼", "git": "🅶", "archive_org": "🏛", "readability": "🆁", "mercury": "🅼", "warc": "📦" } exclude = ["favicon", "title", "headers", "archive_org"] # Missing specific entry for WARC extractor_outputs = defaultdict(lambda: None) for extractor, _ in EXTRACTORS: for result in archive_results: if result.extractor == extractor and result: extractor_outputs[extractor] = result for extractor, _ in EXTRACTORS: if extractor not in exclude: existing = extractor_outputs[extractor] and extractor_outputs[extractor].status == 'succeeded' and extractor_outputs[extractor].output # Check filesystsem to see if anything is actually present (too slow, needs optimization/caching) # if existing: # existing = (Path(path) / existing) # if existing.is_file(): # existing = True # elif existing.is_dir(): # existing = any(existing.glob('*.*')) output += format_html(output_template, path, canon[f"{extractor}_path"], str(bool(existing)), extractor, icons.get(extractor, "?")) if extractor == "wget": # warc isn't technically it's own extractor, so we have to add it after wget # get from db (faster but less thurthful) exists = extractor_outputs[extractor] and extractor_outputs[extractor].status == 'succeeded' and extractor_outputs[extractor].output # get from filesystem (slower but more accurate) # exists = list((Path(path) / canon["warc_path"]).glob("*.warc.gz")) output += format_html(output_template, path, canon["warc_path"], str(bool(exists)), "warc", icons.get("warc", "?")) if extractor == "archive_org": # The check for archive_org is different, so it has to be handled separately # get from db (faster) exists = extractor_outputs[extractor] and extractor_outputs[extractor].status == 'succeeded' and extractor_outputs[extractor].output # get from filesystem (slower) # target_path = Path(path) / "archive.org.txt" # exists = target_path.exists() output += '<a href="{}" class="exists-{}" title="{}">{}</a> '.format(canon["archive_org_path"], str(exists), "archive_org", icons.get("archive_org", "?")) result = format_html('<span class="files-icons" style="font-size: 1.1em; opacity: 0.8; min-width: 240px; display: inline-block">{}<span>', mark_safe(output)) # end = datetime.now() # print(((end - start).total_seconds()*1000) // 1, 'ms') return result return cache.get_or_set(cache_key, calc_snapshot_icons)
def get_all_cohorts_desc(): lazy_all_cohorts = partial(tuple, _Cohort.objects.order_by('-start')) return cache.get_or_set('ALL_COHORTS', lazy_all_cohorts, settings.CACHE_TTL)
def get_context_data(self, **kwargs): context = super(LeaderboardView, self).get_context_data(**kwargs) leaderboard = cache.get_or_set('leaderboard', self.calculate_leaderboard) context.update(leaderboard) return context
def test_get_or_set(self): self.assertIsNone(cache.get('projector')) self.assertEqual(cache.get_or_set('projector', 42), 42) self.assertEqual(cache.get('projector'), 42) self.assertEqual(cache.get_or_set('null', None), None)
def count(cls): """ Get a (cached) count of total number of Child instances. """ return cache.get_or_set(cls.cache_key_count, Child.objects.count, None)
def test_get_or_set_callable(self): def my_callable(): return 'value' self.assertEqual(cache.get_or_set('mykey', my_callable), 'value') self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value')
def get_cached_model(model: Type[models.Model], prop: str, value: str) -> Type[models.Model]: return cache.get_or_set(f"{model.__name__}-{prop}-{value}", get_model_by_params(model, **{prop: value}), 360)
def set(cve_id: str, created: bool): result = cache.get_or_set(NotificationCache.KEY, [], None) result.append((cve_id, created)) cache.set(NotificationCache.KEY, result)
def get_all(cls): return cache.get_or_set(cls.base_cache_key, Category.objects.all())
def get_by_slug(cls, slug): cache_key = cache_key_stringfiy(base_key=cls.base_cache_key, query_dict={"slug": slug}) return cache.get_or_set(cache_key, Category.objects.get(slug=slug))
def my_view(request): posts = cache.get_or_set('posts', Post.objects.all().values('id', 'text')) return JsonResponse(list(posts), safe=False)
def get_data(self, **kwargs): # pylint: disable=arguments-differ return cache.get_or_set(API_TIMESTAMP_KEY, time.time, None)
def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) leaderboard = cache.get_or_set("leaderboard", self.calculate_leaderboard) context["top_earned_investors"] = leaderboard return context
def index(request): cached = cache.get_or_set("lucky_seconds", random.randint(0, 10000000000), 10) return HttpResponse( f"<p>Hello world!</p> <p>Your lucky number for the ten seconds is: {cached}" )