Esempio n. 1
0
def load_menu_settings():
    tlm, fm = get_from_cache()
    if tlm and fm:
        return tlm, fm
    with menu_lock:
        tlm, fm = get_from_cache()
        if tlm and fm:
            return tlm, fm
        tlm = set()
        fm = set()
        pages = Page.objects.filter(
            Q(menuextension__show_on_top_menu=True) |
            Q(menuextension__show_on_footer_menu=True))\
            .select_related('menuextension')\
            .all()
        for page in pages:
            if page.menuextension.show_on_top_menu:
                tlm.add(page.id)
            if page.menuextension.show_on_footer_menu:
                fm.add(page.id)
        cache.set_many({
            'top_level_menu_ids': tlm,
            'footer_menu_ids': fm,
        })
        return tlm, fm
Esempio n. 2
0
    def fetch_by_id(self):
        """
        Run two queries to get objects: one for the ids, one for id__in=ids.

        After getting ids from the first query we can try cache.get_many to
        reuse objects we've already seen.  Then we fetch the remaining items
        from the db, and put those in the cache.  This prevents cache
        duplication.
        """
        # Include columns from extra since they could be used in the query's
        # order_by.
        vals = self.values_list('pk', *self.query.extra.keys())
        pks = [val[0] for val in vals]
        keys = dict((byid(self.model._cache_key(pk)), pk) for pk in pks)
        cached = dict((k, v) for k, v in cache.get_many(keys).items()
                      if v is not None)

        # Pick up the objects we missed.
        missed = [pk for key, pk in keys.items() if key not in cached]
        if missed:
            others = self.fetch_missed(missed)
            # Put the fetched objects back in cache.
            new = dict((byid(o), o) for o in others)
            cache.set_many(new)
        else:
            new = {}

        # Use pks to return the objects in the correct order.
        objects = dict((o.pk, o) for o in cached.values() + new.values())
        for pk in pks:
            yield objects[pk]
Esempio n. 3
0
    def get_context_data(self, *args, **kwargs):
        context = super(InboxView, self).get_context_data(*args, **kwargs)

        object_list = []
        object_id_list = []
        for email in context["page_obj"].object_list:
            object_list.append(email)
            object_id_list.append(email.id)

        if len(object_id_list) == 0:
            return context

        headers = cache.get_many(object_id_list, version="email-header")

        missing_list = set(object_id_list) - set(headers.keys())
        if len(missing_list) > 0:
            missing_headers = models.Header.objects.filter(part__parent=None, part__email__in=missing_list)
            missing_headers = missing_headers.get_many("Subject", "From", group_by="part__email_id")
            headers.update(missing_headers)
            cache.set_many(missing_headers, version="email-header", timeout=None)

        for email in object_list:
            header_set = headers[email.id]
            email.subject = header_set.get("Subject")
            email.sender = header_set["From"]

        inbox = getattr(self, 'inbox_obj', None)
        if inbox is not None:
            inbox = inbox.id

        deal_with_flags.delay(object_id_list, self.request.user.id, inbox)
        return context
Esempio n. 4
0
def load_cache_signals(sender, **kwargs):
    """On startup, sync signals with registered models"""
    from django.db import connection

    if not cache_signals.ready:
        # Have to load directly from db, because CacheBotSignals is not prepared yet
        cursor = connection.cursor()
        try:
            cursor.execute("SELECT * FROM %s" % CacheBotSignals._meta.db_table)
        except Exception, ex:
            # This should only happen on syncdb when CacheBot tables haven't been created yet,
            # but there's not really a good way to catch this error
            sql, references = connection.creation.sql_create_model(CacheBotSignals, no_style())
            cursor.execute(sql[0])
            cursor.execute("SELECT * FROM %s" % CacheBotSignals._meta.db_table)

        results = cursor.fetchall()
        tables = [r[1] for r in results]
        mapping = cache.get_many(tables)
        for r in results:
            key = version_key(".".join(("cachesignals", r[1])))
            accessor_set = mapping.get(key)
            if accessor_set is None:
                accessor_set = set()
            accessor_set.add(r[2:5])
            mapping[key] = accessor_set
        cache.set_many(mapping, CACHE_SECONDS)
        cache_signals.ready = True
Esempio n. 5
0
def snippet_update_lastmod(sender, instance, **kwargs):
    """On a change to a snippet, bump its cached lastmod timestamp"""
    now = mktime(gmtime())
    lastmods = {"%s%s" % (CACHE_SNIPPET_LASTMOD_PREFIX, instance.id): now}
    for rule in instance.client_match_rules.all():
        lastmods["%s%s" % (CACHE_RULE_LASTMOD_PREFIX, rule.id)] = now
    cache.set_many(lastmods, CACHE_TIMEOUT)
Esempio n. 6
0
def check_token():
    """
        { codigo token expirado
        "code": 108,
        "message": "api.error.token.expired"}
    """
    url = URL + 'signin'

    dados = {"email": settings.EMAIL, "password": settings.PASS}

    headers = {
        'Access-Control-Allow-Origin': '*',
        'Content-Type': 'application/json',
        'Accept-Language': "en",
        'Date': time.strftime('%a, %d %b %Y %H:%M:%S GMT'),
        'X-Api-Key': settings.API_STRANS,
    }

    vality = cache.get('vality', None)
    token = cache.get('token', None)

    if vality is None \
        or token is None \
            or vality < datetime.now():
        r = requests.post(url, headers=headers, data=json.dumps(dados))
        resp = json.loads(r.text)
        dt = datetime.now() + timedelta(minutes=9)
        cache.set_many({'vality': dt, 'token': resp['token']})
Esempio n. 7
0
def get_avatars(user_ids, size=AVATAR_DEFAULT_SIZE):
    # Bulk -get for avatars. First try to get the avatars from the cache
    user_ids = set(user_ids)
    cache_keys = [bulk_cache_key(user_id, size) for user_id in user_ids]
    avatar_dict = cache.get_many(cache_keys)

    avatars = {}
    for key, avatar in avatar_dict.iteritems():
        avatars[avatar.user_id] = avatar
        user_ids.remove(avatar.user_id)

    # Fill in any avatars we don't yet have cached
    new_avatars = {}
    from avatar.models import Avatar
    for avatar in Avatar.objects.select_related('user').only('avatar', 'user__id', 'user__username').filter(primary=True, user_id__in=user_ids):
        if not avatar.thumbnail_exists(size):
            avatar.create_thumbnail(size)
        avatars[avatar.user_id] = avatar
        new_avatars[bulk_cache_key(avatar.user_id, size)] = avatar

    if new_avatars:
        # Bulk-cache the new avatars
        cache.set_many(new_avatars, 60*60*24)

    return avatars
Esempio n. 8
0
    def cached_state_set(self, state):
        """Sets both the whole state, or just a partial of it...

        Very useful, but ambiguous."""
        cached_state = self.cached_state_get()
        cached_state.update(state)
        cache.set_many(cached_state)
Esempio n. 9
0
    def test_unicode(self):
        # Unicode values can be cached
        stuff = {
            'ascii': 'ascii_value',
            'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
            'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
            'ascii2': {'x': 1}
        }
        # Test `set`
        for (key, value) in stuff.items():
            cache.set(key, value)
            assert cache.get(key) == value

        # Test `add`
        for (key, value) in stuff.items():
            cache.delete(key)
            cache.add(key, value)
            assert cache.get(key) == value

        # Test `set_many`
        for (key, value) in stuff.items():
            cache.delete(key)
        cache.set_many(stuff)
        for (key, value) in stuff.items():
            assert cache.get(key) == value
Esempio n. 10
0
 def count(self, func_name, request, ip=True, field=None, period=60):
     counters = dict((key, 0) for key in self._keys(
         func_name, request, ip, field, period))
     counters.update(cache.get_many(counters.keys()))
     for key in counters:
         counters[key] += 1
     cache.set_many(counters, timeout=period)
    def test_unicode(self):
        # Unicode values can be cached
        stuff = {
            "ascii": "ascii_value",
            "unicode_ascii": "Iñtërnâtiônàlizætiøn1",
            "Iñtërnâtiônàlizætiøn": "Iñtërnâtiônàlizætiøn2",
            "ascii2": {"x": 1},
        }
        # Test `set`
        for (key, value) in stuff.items():
            cache.set(key, value)
            self.assertEqual(cache.get(key), value)

        # Test `add`
        for (key, value) in stuff.items():
            cache.delete(key)
            cache.add(key, value)
            self.assertEqual(cache.get(key), value)

        # Test `set_many`
        for (key, value) in stuff.items():
            cache.delete(key)
        cache.set_many(stuff)
        for (key, value) in stuff.items():
            self.assertEqual(cache.get(key), value)
Esempio n. 12
0
def get_many_for_update(namespaces, keys):
    """Get cached data, full keys and create namespace counter where necessary.

    Arguments namespaces and keys should be a lists of the same size.

    Returns a pair (cached_data, full_keys),
        where cached_data is a dictionary {full_keys: data}
        and full_keys is a list of full keys, in the same order as
        given namespaces and keys.

    Use cache.set_many({full_key: value}) to finish updating.
    It is assumed namespace counters won't change in between. (if they do,
    maybe it is even better to immediately 'mark' data as invalid...)
    """
    counters = cache.get_many(namespaces)

    # Create new counters
    new_counters = {x: 1 for x in namespaces if x not in counters}
    cache.set_many(new_counters)

    # Generate list of full_keys and list of keys to read from cache.
    full_keys = []
    to_get = []
    for namespace, key in zip(namespaces, keys):
        counter = counters.get(namespace, None)
        if counter:
            full_key = make_full_key(namespace, counter, key)
            full_keys.append(full_key)
            to_get.append(full_key)
        else:
            full_keys.append(make_full_key(namespace, 1, key))

    return cache.get_many(to_get), full_keys
Esempio n. 13
0
def store_project_layers_info(project_key, publish, extent, resolutions, projection):
    prefix = "{0}:{1}:".format(project_key, publish)
    cache.set_many({
        prefix+'extent': ','.join(map(str, extent)),
        prefix+'resolutions': ','.join(map(str, resolutions)),
        prefix+'projection': projection
    })
Esempio n. 14
0
def invalidate_cache(model_class, objects, **extra_keys):
    """
    Flushes the cache of any cached objects associated with this instance.

    Explicitly set a None value instead of just deleting so we don't have any race
    conditions where:
        Thread 1 -> Cache miss, get object from DB
        Thread 2 -> Object saved, deleted from cache
        Thread 1 -> Store (stale) object fetched from DB in cache
    Five second should be more than enough time to prevent this from happening for
    a web app.
    """
    invalidation_dict = {}
    accessor_set = cache_signals.get_global_signals(model_class)
    for obj in objects:
        for (accessor_path, lookup_type, negate) in accessor_set:
            for value in get_values(obj, accessor_path):
                invalidation_key = get_invalidation_key(
                    model_class._meta.db_table, accessor_path=accessor_path, negate=negate, value=value, save=False
                )
                invalidation_dict[invalidation_key] = None

    invalidation_dict.update(cache.get_many(invalidation_dict.keys()))

    cache_keys = set()
    for obj_key, cache_key_list in invalidation_dict.iteritems():
        if cache_key_list:
            cache_keys.update(cache_key_list.split(","))

    keys_to_invalidate = dict([(key, None) for key in chain(cache_keys, invalidation_dict.keys())])
    keys_to_invalidate.update(extra_keys)
    cache.set_many(keys_to_invalidate, 5)
    cache.delete_many(keys_to_invalidate.keys())
Esempio n. 15
0
def cache_nodes(nodes):
    """
    Takes a uri:content dict and caches all at once.
    Encode node content together with plugin ext, persist as native string in cache to prevent pickling.
    """
    data = dict(_prepare_node(uri, content) for uri, content in nodes.iteritems())
    cache.set_many(data, timeout=None)
Esempio n. 16
0
def put(obj):
    if type(obj) == list:
        cache.set_many(dict(zip([_cache_key(o) for o in obj], [o for o in obj])))
        for o in obj:
            _process_cache[_cache_key(o)] = obj
    else:
        cache.set(_cache_key(obj), obj)
        _process_cache[_cache_key(obj)] = obj
Esempio n. 17
0
def uncache_criteria(**kwargs):
    criteria = kwargs.get('instance')
    cache.set_many({
        CRITERIA_KEY % criteria.name: None,
        CRITERIA_FLAGS_KEY % criteria.name: None,
        CRITERIA_USERS_KEY % criteria.name: None,
        CRITERIA_GROUPS_KEY % criteria.name: None,
        ALL_CRITERIA_KEY: None}, 5)
Esempio n. 18
0
    def test_get_many(self):
        data = dict(zip(self.KEY_LIST, self.VALUE_LIST))

        cache.set_many(data)

        vals = cache.get_many(self.KEY_LIST)

        self.assertEqual(data, vals)
Esempio n. 19
0
 def count(self, func_name, request, ip=True, field=None, period=60):
     """Increment counters for all relevant cache_keys given a request."""
     counters = dict((key, 1) for key in self._keys(
         func_name, request, ip, field, period))
     counters.update(cache.get_many(counters.keys()))
     for key in counters:
         counters[key] += 1
     cache.set_many(counters, timeout=period)
Esempio n. 20
0
def generate_access_codes(user):
    ts = _get_timestamp()
    salt = AUTH_CODE_SALT
    codes = [formatter(user, salt, ts) for formatter in AUTH_CODE_FORMATTERS]
    formatted_codes = dict([(_gen_auth_key(code), user.id) for code in codes])
    cache.set_many(formatted_codes, AUTH_CODE_TIMEOUT)
    cache.set(_gen_user_key(user.id), codes, AUTH_CODE_TIMEOUT)
    return codes
Esempio n. 21
0
    def test_set_many(self):
        data = dict(zip(self.KEY_LIST, self.VALUE_LIST))

        cache.set_many(data)

        for idx, key in enumerate(self.KEY_LIST):
            val = cache.get(key)
            self.assertEqual(val, self.VALUE_LIST[idx])
Esempio n. 22
0
    def diff(self, request, new_imgid, old_imgid):

        new_img = Image.objects.select_related("container_repo").prefetch_related("repo", "container_repo__platform").get(pk = new_imgid)
        old_img = Image.objects.select_related("container_repo").prefetch_related("repo", "container_repo__platform").get(pk = old_imgid)
        live_diff = (False, False)

        new_repo = new_img.container_repo
        old_repo = old_img.container_repo
        trace_reqs = _get_trace(old_repo, new_repo)
        issue_ref = []
        names = []
        for repo in new_img.repo.all():
            for i in repo.platform.issuetracker_set.all():
                if not i.name in names:
                    issue_ref.append({'name' : i.name, 're' : i.re, 'url' : i.url})
                    names.append(i.name)
        filter_repos = set(request.GET.getlist("repo"))
        filter_meta = _get_filter_meta(request.GET)

        cachekey = "%s%s%s" % ("repodiff", new_repo.id, old_repo.id)
        cached = cache.get_many([cachekey, cachekey + 'ts'])
        diff = cached.get(cachekey)
        diffts = cached.get(cachekey + 'ts')

        if diff is None:
            diff = _diff_sacks(new_repo, old_repo)
            diffts = datetime.datetime.now()
            cachelife = (60 * 3) if (live_diff[0] or live_diff[1]) else (60 * 60 * 24)
            cache.set_many({cachekey: diff, cachekey + 'ts' : diffts}, cachelife)

        diff = _sort_filter_diff(diff, pkgs=list(set(new_img.packages) | set(old_img.packages)), repos=filter_repos, meta=filter_meta )

        title = "Comparing Images"
        is_popup = request.GET.get('is_popup', False)

        full_path = "%s?" % request.path
        for query, values in request.GET.lists():
            full_path += "&".join([ '%s=%s' % (query, val) for val in values ])

        full_path += "&"

        context = {
            "title": title,
            "opts": self.model._meta,
            "app_label": self.model._meta.app_label,
            'diff' : diff,
            'diffts' : diffts,
            'live_diff' : live_diff,
            'new_obj' : new_img,
            'old_obj' : old_img,
            'is_popup' : is_popup,
            'trace': trace_reqs,
            'issue_ref'  : json.dumps(issue_ref),
            'packagemetatypes' : list(PackageMetaType.objects.all().prefetch_related("choices")),
            "full_path" : full_path,
            }

        return TemplateResponse(request, 'diff.html', context = context, current_app=self.admin_site.name)
Esempio n. 23
0
 def jump(self, date=None, speed=None):
     if not self._enabled:
         raise RuntimeError(u'Timewarp is disabled.')
     cache.set_many({
         u'timewarp.warped_from': self.real_time,
         u'timewarp.warped_to': time_orig.mktime(date.timetuple()) if date is not None else self.warped_time,
         u'timewarp.speedup': speed if speed is not None else self.speedup,
         }, timeout=None)
     self._lastupdate = None
Esempio n. 24
0
def uncache_flag(**kwargs):
    flag = kwargs.get('instance')
    data = {
        keyfmt(FLAG_CACHE_KEY, flag.name): None,
        keyfmt(FLAG_USERS_CACHE_KEY, flag.name): None,
        keyfmt(FLAG_GROUPS_CACHE_KEY, flag.name): None,
        keyfmt(FLAGS_ALL_CACHE_KEY): None
    }
    cache.set_many(data, 5)
 def push_to_remote(self):
     """
     Update the remote cache
     """
     if self._max_scores_updates:
         cache.set_many(
             {self._remote_cache_key(key): value for key, value in self._max_scores_updates.items()},
             60 * 60 * 24,  # 1 day
         )
Esempio n. 26
0
	def incr_many(self, keys):
		values = cache.get_many(keys)
		for key in keys:
			if key in values:
				values[key] += 1
			else:
				values[key] = 0

		cache.set_many(values, self.timeout)
Esempio n. 27
0
def uncache_flag(**kwargs):
    flag = kwargs.get('instance')
    data = {
        FLAG_CACHE_KEY.format(n=hashlib.md5(flag.name).hexdigest()): None,
        FLAG_USERS_CACHE_KEY.format(n=hashlib.md5(flag.name).hexdigest()): None,
        FLAG_GROUPS_CACHE_KEY.format(n=hashlib.md5(flag.name).hexdigest()): None,
        FLAGS_ALL_CACHE_KEY: None
    }
    cache.set_many(data, 5)
Esempio n. 28
0
 def test_set_many(self):
     if not self._run_test():
         return
     cache.set_many({'hello1':'world1','hello2':'world2'},30)
     self.assertEquals(len(cache._logger.log), 0)
     self.assertEquals(cache.get("hello1"), "world1")
     self.assertEquals(len(cache._logger.log), 0)
     cache.close()
     self.assertEquals(len(cache._logger.log), 1)
Esempio n. 29
0
    def transform(self, tuples, cache_key=None, dict_key='pk',
                        empty_value=-1):
        """
        data - список pk, tuple или list, в которых pk или нулевой элемент должен быть заменен на объект
        Заменяет ID модели на сам объект
        tuples - список tuples или значений.
        Если tuple, то ID - нулевой
        """
        if not tuples or not len(tuples):
            return tuples

        # Подготовка ключей для кеша
        is_list = type(tuples[0]) in [tuple, list]
        is_tuple = is_list and type(tuples[0]) is tuple
        # Словарь id -> ключ
        keys = {}
        for t in tuples:
            if is_list:
                keys[t[0]] = self.key(cache_key, kwargs={dict_key: t[0]})
            else:
                keys[t] = self.key(cache_key, kwargs={dict_key: t})
        # Загрузка всех ключей
        values = cache.get_many(keys.values())

        # Подготовка ключей для загрузки из БД
        objects = {}
        if len(values) != len(keys):
            # список ключей
            not_loaded = set(keys.values()).difference(set(values.keys()))
            if not_loaded:
                # Перевернуть ключи, чтобы по ключу получить id
                keys_un = dict((value, key) for key, value in keys.items())
                not_loaded = [keys_un[x] for x in not_loaded]
                objects = {o.id: o for o in self.get_query_set().filter(**{dict_key+'__in': not_loaded})}

        # Возврат списка объектов
        result = []
        cache_set = {}
        for t in tuples:
            id = is_list and t[0] or t
            ck = keys[id]
            value = values.get(ck, empty_value)
            if value is empty_value:
                value = objects.get(id, empty_value)
                if value is empty_value:
                    continue
                cache_set[ck] = value
            if is_list:
                if is_tuple:
                    result.append((value,) + t[1:])
                else:
                    result.append([value] + t[1:])
            else:
                result.append(value)
        if cache_set:
            cache.set_many(cache_set)
        return result
Esempio n. 30
0
    def test_cluster_admin_counts_cache(self):
        """ tests the cache for the admin cluster counts on the status page
        these tests will fail if cache is not configured

        Verifies:
            * existing values are updated
            * any of the dict keys can be updated
            * keys not in the cache are discarded
        """
        ids = ["cluster_admin_0", "cluster_admin_1", "cluster_admin_2"]

        data = {
            "cluster_admin_0": {"orphaned": 1, "missing": 2, "ready_to_import": 3},
            "cluster_admin_1": {"orphaned": 4, "missing": 5, "ready_to_import": 6},
        }
        cache.set_many(data)

        update = {0: 4, 1: 5, 3: 6}

        # update orphaned
        update_vm_counts("orphaned", update)
        cached = cache.get_many(ids)
        self.assertTrue("cluster_admin_0" in cached)
        self.assertTrue("cluster_admin_1" in cached)
        self.assertFalse("cluster_admin_2" in cached)
        self.assertEqual(5, cached["cluster_admin_0"]["orphaned"])
        self.assertEqual(2, cached["cluster_admin_0"]["missing"])
        self.assertEqual(3, cached["cluster_admin_0"]["ready_to_import"])
        self.assertEqual(9, cached["cluster_admin_1"]["orphaned"])
        self.assertEqual(5, cached["cluster_admin_1"]["missing"])
        self.assertEqual(6, cached["cluster_admin_1"]["ready_to_import"])

        # update orphaned
        update_vm_counts("missing", update)
        cached = cache.get_many(ids)
        self.assertTrue("cluster_admin_0" in cached)
        self.assertTrue("cluster_admin_1" in cached)
        self.assertFalse("cluster_admin_2" in cached)
        self.assertEqual(5, cached["cluster_admin_0"]["orphaned"])
        self.assertEqual(6, cached["cluster_admin_0"]["missing"])
        self.assertEqual(3, cached["cluster_admin_0"]["ready_to_import"])
        self.assertEqual(9, cached["cluster_admin_1"]["orphaned"])
        self.assertEqual(10, cached["cluster_admin_1"]["missing"])
        self.assertEqual(6, cached["cluster_admin_1"]["ready_to_import"])

        # update ready_to_import
        update_vm_counts("ready_to_import", update)
        cached = cache.get_many(ids)
        self.assertTrue("cluster_admin_0" in cached)
        self.assertTrue("cluster_admin_1" in cached)
        self.assertFalse("cluster_admin_2" in cached)
        self.assertEqual(5, cached["cluster_admin_0"]["orphaned"])
        self.assertEqual(6, cached["cluster_admin_0"]["missing"])
        self.assertEqual(7, cached["cluster_admin_0"]["ready_to_import"])
        self.assertEqual(9, cached["cluster_admin_1"]["orphaned"])
        self.assertEqual(10, cached["cluster_admin_1"]["missing"])
        self.assertEqual(11, cached["cluster_admin_1"]["ready_to_import"])
Esempio n. 31
0
def get_users_for_authors(organization_id, authors, user=None):
    """
    Returns a dictionary of author_id => user, if a Sentry
    user object exists for that email. If there is no matching
    Sentry user, a {user, email} dict representation of that
    author is returned.
    e.g.
    {
        1: serialized(<User id=1>),
        2: {email: '*****@*****.**', name: 'dunno'},
        ...
    }
    """
    results = {}

    fetched = cache.get_many([
        _user_to_author_cache_key(organization_id, author)
        for author in authors
    ])
    if fetched:
        missed = []
        for author in authors:
            fetched_user = fetched.get(
                _user_to_author_cache_key(organization_id, author))
            if fetched_user is None:
                missed.append(author)
            else:
                results[str(author.id)] = fetched_user
    else:
        missed = authors

    if missed:
        # Filter users based on the emails provided in the commits
        user_emails = list(
            UserEmail.objects.filter(
                in_iexact("email", [a.email for a in missed])).order_by("id"))

        # Filter users belonging to the organization associated with
        # the release
        users = User.objects.filter(
            id__in={ue.user_id
                    for ue in user_emails},
            is_active=True,
            sentry_orgmember_set__organization_id=organization_id,
        )
        users = serialize(list(users), user)
        users_by_id = {user["id"]: user for user in users}
        # Figure out which email address matches to a user
        users_by_email = {}
        for email in user_emails:
            # force emails to lower case so we can do case insensitive matching
            lower_email = email.email.lower()
            if lower_email not in users_by_email:
                user = users_by_id.get(str(email.user_id), None)
                # user can be None if there's a user associated
                # with user_email in separate organization
                if user:
                    users_by_email[lower_email] = user
        to_cache = {}
        for author in missed:
            results[str(author.id)] = users_by_email.get(
                author.email.lower(), {
                    "name": author.name,
                    "email": author.email
                })
            to_cache[_user_to_author_cache_key(
                organization_id, author)] = results[str(author.id)]
        cache.set_many(to_cache)

    metrics.incr("sentry.release.get_users_for_authors.missed",
                 amount=len(missed))
    metrics.incr("sentry.tasks.process_suspect_commits.total",
                 amount=len(results))
    return results
Esempio n. 32
0
 def set_many(self, values, timeout=None):
     raw_values = dict((self._prefix_key(key), value)
                       for (key, value) in values.items())
     cache.set_many(raw_values, timeout)
     self._cache_data.update(values)
Esempio n. 33
0
 def set_sessions_active(cls, session_ids):
     data = {cls.ACTIVE_CACHE_KEY_PREFIX.format(i): i for i in session_ids}
     cache.set_many(data, timeout=5 * 60)
Esempio n. 34
0
 def test_clear(self):
     # The cache can be emptied using clear
     cache.set_many({'key1': 'spam', 'key2': 'eggs'})
     cache.clear()
     self.assertIsNone(cache.get('key1'))
     self.assertIsNone(cache.get('key2'))
Esempio n. 35
0
    def test_get_many(self, mock_warning, mock_info):
        programs = ProgramFactory.create_batch(3)

        # Cache details for 2 of 3 programs.
        partial_programs = {
            PROGRAM_CACHE_KEY_TPL.format(uuid=program['uuid']): program
            for program in programs[:2]
        }
        cache.set_many(partial_programs, None)

        # When called before UUIDs are cached, the function should return an
        # empty list and log a warning.
        self.assertEqual(get_programs(self.site), [])
        mock_warning.assert_called_once_with(
            'Failed to get program UUIDs from the cache for site {}.'.format(
                self.site.domain))
        mock_warning.reset_mock()

        # Cache UUIDs for all 3 programs.
        cache.set(
            SITE_PROGRAM_UUIDS_CACHE_KEY_TPL.format(domain=self.site.domain),
            [program['uuid'] for program in programs], None)

        actual_programs = get_programs(self.site)

        # The 2 cached programs should be returned while info and warning
        # messages should be logged for the missing one.
        self.assertEqual(
            set(program['uuid'] for program in actual_programs),
            set(program['uuid'] for program in partial_programs.values()))
        mock_info.assert_called_with(
            'Failed to get details for 1 programs. Retrying.')
        mock_warning.assert_called_with(
            'Failed to get details for program {uuid} from the cache.'.format(
                uuid=programs[2]['uuid']))
        mock_warning.reset_mock()

        # We can't use a set comparison here because these values are dictionaries
        # and aren't hashable. We've already verified that all programs came out
        # of the cache above, so all we need to do here is verify the accuracy of
        # the data itself.
        for program in actual_programs:
            key = PROGRAM_CACHE_KEY_TPL.format(uuid=program['uuid'])
            self.assertEqual(program, partial_programs[key])

        # Cache details for all 3 programs.
        all_programs = {
            PROGRAM_CACHE_KEY_TPL.format(uuid=program['uuid']): program
            for program in programs
        }
        cache.set_many(all_programs, None)

        actual_programs = get_programs(self.site)

        # All 3 programs should be returned.
        self.assertEqual(
            set(program['uuid'] for program in actual_programs),
            set(program['uuid'] for program in all_programs.values()))
        self.assertFalse(mock_warning.called)

        for program in actual_programs:
            key = PROGRAM_CACHE_KEY_TPL.format(uuid=program['uuid'])
            self.assertEqual(program, all_programs[key])
Esempio n. 36
0
    def test_get_many(self, mock_warning, mock_info):
        pathways = CreditPathwayFactory.create_batch(3)

        # Cache details for 2 of 3 programs.
        partial_pathways = {
            CREDIT_PATHWAY_CACHE_KEY_TPL.format(id=pathway['id']): pathway
            for pathway in pathways[:2]
        }
        cache.set_many(partial_pathways, None)

        # When called before pathways are cached, the function should return an
        # empty list and log a warning.
        self.assertEqual(get_credit_pathways(self.site), [])
        mock_warning.assert_called_once_with(
            'Failed to get credit pathway ids from the cache.')
        mock_warning.reset_mock()

        # Cache all 3 pathways
        cache.set(
            SITE_CREDIT_PATHWAY_IDS_CACHE_KEY_TPL.format(
                domain=self.site.domain),
            [pathway['id'] for pathway in pathways], None)

        actual_pathways = get_credit_pathways(self.site)

        # The 2 cached pathways should be returned while info and warning
        # messages should be logged for the missing one.
        self.assertEqual(
            set(pathway['id'] for pathway in actual_pathways),
            set(pathway['id'] for pathway in partial_pathways.values()))
        mock_info.assert_called_with(
            'Failed to get details for 1 pathways. Retrying.')
        mock_warning.assert_called_with(
            'Failed to get details for credit pathway {id} from the cache.'.
            format(id=pathways[2]['id']))
        mock_warning.reset_mock()

        # We can't use a set comparison here because these values are dictionaries
        # and aren't hashable. We've already verified that all pathways came out
        # of the cache above, so all we need to do here is verify the accuracy of
        # the data itself.
        for pathway in actual_pathways:
            key = CREDIT_PATHWAY_CACHE_KEY_TPL.format(id=pathway['id'])
            self.assertEqual(pathway, partial_pathways[key])

        # Cache details for all 3 pathways.
        all_pathways = {
            CREDIT_PATHWAY_CACHE_KEY_TPL.format(id=pathway['id']): pathway
            for pathway in pathways
        }
        cache.set_many(all_pathways, None)

        actual_pathways = get_credit_pathways(self.site)

        # All 3 pathways should be returned.
        self.assertEqual(
            set(pathway['id'] for pathway in actual_pathways),
            set(pathway['id'] for pathway in all_pathways.values()))
        self.assertFalse(mock_warning.called)

        for pathway in actual_pathways:
            key = CREDIT_PATHWAY_CACHE_KEY_TPL.format(id=pathway['id'])
            self.assertEqual(pathway, all_pathways[key])
Esempio n. 37
0
 def test_set_many(self):
     """Test the set_many cache operation"""
     cache.set_many({"a": 1, "b": 2, "c": 3})
     res = cache.get_many(["a", "b", "c"])
     self.assertEqual(res, {"a": 1, "b": 2, "c": 3})
Esempio n. 38
0
    def diff(self, request, new_imgid, old_imgid):

        new_img = Image.objects.select_related(
            "container_repo").prefetch_related(
                "repo", "container_repo__platform").get(pk=new_imgid)
        old_img = Image.objects.select_related(
            "container_repo").prefetch_related(
                "repo", "container_repo__platform").get(pk=old_imgid)
        live_diff = (False, False)

        new_repo = new_img.container_repo
        old_repo = old_img.container_repo
        trace_reqs = _get_trace(old_repo, new_repo)
        issue_ref = []
        names = []
        for repo in new_img.repo.all():
            for i in repo.platform.issuetracker_set.all():
                if not i.name in names:
                    issue_ref.append({
                        'name': i.name,
                        're': i.re,
                        'url': i.url
                    })
                    names.append(i.name)
        filter_repos = set(request.GET.getlist("repo"))
        filter_meta = _get_filter_meta(request.GET)

        cachekey = "%s%s%s" % ("repodiff", new_repo.id, old_repo.id)
        cached = cache.get_many([cachekey, cachekey + 'ts'])
        diff = cached.get(cachekey)
        diffts = cached.get(cachekey + 'ts')

        if diff is None:
            diff = _diff_sacks(new_repo, old_repo)
            diffts = datetime.datetime.now()
            cachelife = (60 *
                         3) if (live_diff[0] or live_diff[1]) else (60 * 60 *
                                                                    24)
            cache.set_many({
                cachekey: diff,
                cachekey + 'ts': diffts
            }, cachelife)

        diff = _sort_filter_diff(
            diff,
            pkgs=list(set(new_img.packages) | set(old_img.packages)),
            repos=filter_repos,
            meta=filter_meta)

        title = "Comparing Images"
        is_popup = request.GET.get('is_popup', False)

        full_path = "%s?" % request.path
        for query, values in request.GET.lists():
            full_path += "&".join(['%s=%s' % (query, val) for val in values])

        full_path += "&"

        context = {
            "title":
            title,
            "opts":
            self.model._meta,
            "app_label":
            self.model._meta.app_label,
            'diff':
            diff,
            'diffts':
            diffts,
            'live_diff':
            live_diff,
            'new_obj':
            new_img,
            'old_obj':
            old_img,
            'is_popup':
            is_popup,
            'trace':
            trace_reqs,
            'issue_ref':
            json.dumps(issue_ref),
            'packagemetatypes':
            list(PackageMetaType.objects.all().prefetch_related("choices")),
            "full_path":
            full_path,
        }

        return TemplateResponse(request,
                                'diff.html',
                                context=context,
                                current_app=self.admin_site.name)
Esempio n. 39
0
 def test_set_many(self):
     # Multiple keys can be set using set_many
     cache.set_many({'key1': 'spam', 'key2': 'eggs'})
     self.assertEqual(cache.get('key1'), 'spam')
     self.assertEqual(cache.get('key2'), 'eggs')
Esempio n. 40
0
 def test_delete_with_prefix_with_no_reverse_works(self):
     cache.set_many({'K1': 'value', 'K2': 'value2', 'B2': 'Anothervalue'})
     assert cache.delete_with_prefix('K') == 2
     assert cache.get_many(['K1', 'K2', 'B2']) == {'B2': 'Anothervalue'}
Esempio n. 41
0
def get_cached_objects(pks, model=None, timeout=CACHE_TIMEOUT, missing=RAISE):
    """
    Return a list of objects with given PKs using cache.

    Params:
        pks - list of Primary Key values to look up or list of content_type_id, pk tuples
        model - ContentType instance representing the model's class or the model class itself
        timeout - TTL for the items in cache, defaults to CACHE_TIMEOUT

    Throws:
        model.DoesNotExist is propagated from content_type.get_object_for_this_type
    """
    if model is not None:
        if not isinstance(model, ContentType):
            model = ContentType.objects.get_for_model(model)
        pks = [(model, pk) for pk in pks]
    else:
        pks = [(ContentType.objects.get_for_id(ct_id), pk) for (ct_id, pk) in pks]

    keys = [_get_key(KEY_PREFIX, model, pk=pk) for (model, pk) in pks]

    cached = cache.get_many(keys)

    # keys not in cache
    keys_to_set = set(keys) - set(cached.keys())
    if keys_to_set:
        # build lookup to get model and pks from the key
        lookup = dict(zip(keys, pks))

        to_get = {}
        # group lookups by CT so we can do in_bulk
        for k in keys_to_set:
            ct, pk = lookup[k]
            to_get.setdefault(ct, {})[int(pk)] = k

        # take out all the publishables
        publishable_ct = ContentType.objects.get_for_model(get_model('core', 'publishable'))
        if publishable_ct in to_get:
            publishable_keys = to_get.pop(publishable_ct)
            models = publishable_ct.model_class()._default_manager.values('content_type_id', 'id').filter(id__in=publishable_keys.keys())
            for m in models:
                ct = ContentType.objects.get_for_id(m['content_type_id'])
                pk = m['id']
                # and put them back as their native content_type
                to_get.setdefault(ct, {})[pk] = publishable_keys[pk]

        to_set = {}
        # retrieve all the models from DB
        for ct, vals in to_get.items():
            models = ct.model_class()._default_manager.in_bulk(vals.keys())
            for pk, m in models.items():
                k = vals[pk]
                cached[k] = to_set[k] = m

        if not isinstance(cache, DummyCache):
            # write them into cache
            cache.set_many(to_set, timeout=timeout)

    out = []
    for k in keys:
        try:
            out.append(cached[k])
        except KeyError:
            if missing == NONE:
                out.append(None)
            elif missing == SKIP:
                pass
            elif missing == RAISE:
                ct = ContentType.objects.get_for_id(int(k.split(':')[1]))
                raise ct.model_class().DoesNotExist(
                    '%s matching query does not exist.' % ct.model_class()._meta.object_name)
    return out
Esempio n. 42
0
    def get_cached(self,
                   slugs=None,
                   pks=None,
                   select_related_media_tag=True,
                   portal_id=None):
        """
        Gets all ideas defined by either `slugs` or `pks`.

        `slugs` and `pks` may be a list or tuple of identifiers to use for
        request where the elements are of type string / unicode or int,
        respectively. You may provide a single string / unicode or int directly
        to query only one object.

        :returns: An instance or a list of instances of :class:`CosinnusGroup`.
        :raises: If a single object is defined a `CosinnusGroup.DoesNotExist`
            will be raised in case the requested object does not exist.
        """
        if portal_id is None:
            portal_id = CosinnusPortal.get_current().id

        # Check that at most one of slugs and pks is set
        assert not (slugs and pks)
        assert not (slugs or pks)

        if slugs is not None:
            if isinstance(slugs, six.string_types):
                # We request a single idea
                slugs = [slugs]

            # We request multiple ideas by slugs
            keys = [self._IDEAS_SLUG_CACHE_KEY % (portal_id, s) for s in slugs]
            ideas = cache.get_many(keys)
            missing = [key.split('/')[-1] for key in keys if key not in ideas]
            if missing:
                # we can only find ideas via this function that are in the same portal we run in
                query = self.get_queryset().filter(portal__id=portal_id,
                                                   is_active=True,
                                                   slug__in=missing)
                if select_related_media_tag:
                    query = query.select_related('media_tag')

                for idea in query:
                    ideas[self._IDEAS_SLUG_CACHE_KEY %
                          (portal_id, idea.slug)] = idea
                cache.set_many(ideas, settings.COSINNUS_IDEA_CACHE_TIMEOUT)

            # sort by a good sorting function that acknowldges umlauts, etc, case insensitive
            idea_list = list(ideas.values())
            idea_list = sorted(idea_list, key=sort_key_strcoll_attr('name'))
            return idea_list

        elif pks is not None:
            if isinstance(pks, int):
                pks = [pks]
            else:
                # We request multiple ideas
                cached_pks = self.get_pks(portal_id=portal_id)
                slugs = [
                    _f for _f in (cached_pks.get(pk, []) for pk in pks) if _f
                ]
                if slugs:
                    return self.get_cached(slugs=slugs, portal_id=portal_id)
                return []  # We rely on the slug and id maps being up to date
        return []
Esempio n. 43
0
 def test_set_many_returns_empty_list_on_success(self):
     """set_many() returns an empty list when all keys are inserted."""
     failing_keys = cache.set_many({'key1': 'spam', 'key2': 'eggs'})
     self.assertEqual(failing_keys, [])
Esempio n. 44
0
 def handle(self, *args, **options):
     top_users = Profile.objects.by_rating()[:3]
     top_tags = Tag.objects.hottest()[:3]
     cache.set_many({'top_users': top_users, 'top_tags': top_tags})
Esempio n. 45
0
 def count(self, request, ip=True, field=None, period=60):
     counters = dict((key, 0) for key in self._keys(request, ip, field))
     counters.update(cache.get_many(counters.keys()))
     for key in counters:
         counters[key] += 1
     cache.set_many(counters, timeout=period)
Esempio n. 46
0
def get_instance(model, instance_or_pk, timeout=None, using=None):
    """
    Returns the ``model`` instance with a primary key of ``instance_or_pk``.

    If the data is cached it will be returned from there, otherwise the regular
    Django ORM is queried for this instance and the data stored in the cache.

    If omitted, the timeout value defaults to
    ``settings.CACHE_TOOLBOX_DEFAULT_TIMEOUT`` instead of 0 (zero).

    Example::

        >>> get_instance(User, 1) # Cache miss
        <User: lamby>
        >>> get_instance(User, 1) # Cache hit
        <User: lamby>
        >>> User.objects.get(pk=1) == get_instance(User, 1)
        True
    """

    pk = getattr(instance_or_pk, "pk", instance_or_pk)

    primary_model = model
    descriptors = getattr(primary_model, "_cache_fetch_related", ())
    models = [model, *(d.related.field.model for d in descriptors)]
    # Note: we're assuming that the relations are primary key foreign keys, and
    # so all have the same primary key. This matches the assumption which
    # `cache_relation` makes.
    keys_to_models = {instance_key(model, instance_or_pk): model for model in models}

    data_map = cache.get_many(tuple(keys_to_models.keys()))
    instance_map = {}

    if data_map.keys() == keys_to_models.keys():
        try:
            for key, data in data_map.items():
                model = keys_to_models[key]
                instance_map[key] = deserialise(model, data, pk, using)
        except:
            # Error when deserialising - remove from the cache; we will
            # fallback and return the underlying instance
            cache.delete_many(tuple(keys_to_models.keys()))

        else:
            key = instance_key(primary_model, instance_or_pk)
            primary_instance = instance_map[key]

            for descriptor in descriptors:
                related_instance = instance_map[
                    instance_key(
                        descriptor.related.field.model,
                        instance_or_pk,
                    )
                ]
                related_cache_name = get_related_cache_name(
                    get_related_name(descriptor),
                )
                setattr(primary_instance, related_cache_name, related_instance)

            return primary_instance

    related_names = [d.related.field.related_query_name() for d in descriptors]

    # Use the default manager so we are never filtered by a .get_query_set()
    queryset = primary_model._default_manager.using(using)
    if related_names:
        # NB: select_related without args selects all it can find, which we don't want.
        queryset = queryset.select_related(*related_names)
    primary_instance = queryset.get(pk=pk)

    instances = [
        primary_instance,
        *(getattr(primary_instance, x, None) for x in related_names),
    ]

    cache_data = {}
    for instance in instances:
        if instance is None:
            continue

        key = instance_key(instance._meta.model, instance)
        cache_data[key] = serialise(instance)

    if timeout is None:
        timeout = app_settings.CACHE_TOOLBOX_DEFAULT_TIMEOUT

    cache.set_many(cache_data, timeout)

    return primary_instance
Esempio n. 47
0
def _add_entity_to_memcache(model, entity, identifiers):
    cache.set_many({x: entity
                    for x in identifiers},
                   timeout=CACHE_TIMEOUT_SECONDS)
Esempio n. 48
0
    def post(self, request, *args, **kwargs):
        errors = []

        try:
            request_id = str(uuid4())

            file_opts = {}

            targetgenes_file, targetgenes_source = get_file(
                request, "targetgenes", gene_lists_storage)
            filter_tfs_file, filter_tfs_source = get_file(request, "filtertfs")
            target_networks, networks_source = get_file(
                request, "targetnetworks", networks_storage)
            background_genes_file, background_genes_source = get_file(
                request, "backgroundgenes")

            if background_genes_file:
                background_genes = get_background_genes(background_genes_file)
                file_opts['target_filter_list'] = background_genes
                cache.set(f'{request_id}/background_genes', background_genes)

            if targetgenes_file:
                user_lists = get_gene_lists(targetgenes_file)

                if 'target_filter_list' in file_opts:
                    user_lists = filter_gene_lists_by_background(
                        user_lists, file_opts['target_filter_list'])

                bad_genes = check_annotations(user_lists[0].index)
                if bad_genes and targetgenes_source != 'storage':
                    errors.append(
                        f'Genes in Target Genes File not in database: {", ".join(bad_genes)}'
                    )

                if not target_networks:
                    cache.set(f'{request_id}/target_genes',
                              user_lists)  # cache the user list here

                file_opts["user_lists"] = user_lists

            if filter_tfs_file:
                filter_tfs = get_genes(filter_tfs_file)
                file_opts["tf_filter_list"] = filter_tfs

                bad_genes = check_annotations(filter_tfs)
                if bad_genes and filter_tfs_source != 'storage':
                    errors.append(
                        f'Genes in Filter TFs File not in database: {", ".join(bad_genes)}'
                    )

            if target_networks:
                network = get_network(
                    target_networks,
                    headers=request.POST.get('networkHeaders') == 'true')

                bad_genes = check_annotations(network[1]['source'].append(
                    network[1]['target']))
                if bad_genes and networks_source != 'storage':
                    errors.append(
                        f'Genes in Network File not in database: {", ".join(bad_genes)}'
                    )

                try:
                    user_lists = file_opts["user_lists"]
                    # merge network with current user_lists
                    network, user_lists = merge_network_lists(
                        network, user_lists)
                except KeyError:
                    user_lists = network_to_lists(network)

                file_opts["user_lists"] = user_lists

                try:
                    tf_filter_list = file_opts["tf_filter_list"]
                    network, tf_filter_list = merge_network_filter_tfs(
                        network, tf_filter_list)
                except KeyError:
                    tf_filter_list = network_to_filter_tfs(network)

                file_opts["tf_filter_list"] = tf_filter_list

                cache.set_many({
                    f'{request_id}/target_network': network,
                    f'{request_id}/target_genes': user_lists
                })

            edges = request.POST.getlist('edges')
            query = request.POST['query']

            result, metadata, stats, _uid, ids = get_query_result(
                query=query,
                edges=edges,
                size_limit=50_000_000,
                uid=request_id,
                **file_opts)
            columns, merged_cells, result_list = format_data(
                result, stats, metadata, ids)
            metadata = metadata_to_dict(metadata)

            cache.set_many({
                f'{request_id}/query':
                query.strip() + '\n',  # save queries
                f'{request_id}/formatted_tabular_output':
                (columns, merged_cells, result_list, metadata),
            })

            res = {
                'result': {
                    'data': result_list,
                    'mergeCells': merged_cells,
                    'columns': columns
                },
                'metadata': metadata,
                'request_id': request_id,
                'analysis_ids': list(ids.items())
            }

            if errors:
                res['errors'] = errors

            return JsonResponse(res, encoder=PandasJSONEncoder)
        except (QueryError, BadFile) as e:
            return HttpResponseBadRequest(e)
        except ValueError as e:
            return HttpResponseNotFound(f'Query not available: {e}',
                                        content_type='text/plain')
        except MultiValueDictKeyError as e:
            return HttpResponseBadRequest(f"Problem with query: {e}")
Esempio n. 49
0
def search_flights(request):
    """
	Search flights based on Airport code ,date,travellers,seating class
	"""
    GO = goibiboAPI(settings.API_USERNAME, settings.API_PASSWORD)
    try:
        trip = request.POST.get('trip', request.COOKIES.get('trip'))
        startdate = request.POST.get('start', request.COOKIES.get('start'))
        date, month, year = startdate.split('/')
        departure = year + month + date
        source = request.POST.get('iata_code_source',
                                  request.COOKIES.get('s_code')).strip()
        destination = request.POST.get('iata_code_destination',
                                       request.COOKIES.get('d_code')).strip()
        adult = request.POST.get('f_adults', request.COOKIES.get('adults'))
        child = request.POST.get('f_childs',
                                 request.COOKIES.get('childs', '0'))
        infant = request.POST.get('f_infants',
                                  request.COOKIES.get('infants', '0'))
        s_class = request.POST.get('f_class', request.COOKIES.get('class'))
        source_city = request.POST.get('flight_source',
                                       request.COOKIES.get('s_city'))
        destination_city = request.POST.get('flight_destination',
                                            request.COOKIES.get('d_city'))
    except:
        messages.add_message(request, messages.INFO,
                             'User Entered data Incorrect')
        return HttpResponseRedirect(format_redirect_url("/", 'error=101'))
    if 'round' in trip:
        enddate = request.POST.get('end', request.COOKIES.get('end'))
        date, month, year = enddate.split('/')
        arrival = year + month + date
        # this is return url accessing query and response from api
        query, search_flights_response = GO.SearchFlights(
            trip, departure, source, destination, adult, child, infant,
            s_class, arrival)
        # split onwardflight data and return flight data from search_flights_response
        onwardflight = search_flights_response['data']['onwardflights']
        returnflight = search_flights_response['data']['returnflights']
        if onwardflight:
            #Flights sorting based on price
            sorted_onward_flights = sorted(
                onwardflight, key=lambda x: x['fare']['totalfare'])
            sorted_return_flights = sorted(
                returnflight, key=lambda x: x['fare']['totalfare'])
            # Zipping for template convience
            zipped = zip(sorted_onward_flights, sorted_return_flights)
            # stored flight details in redis cache based on search queries
            # Flight row id is unique value
            caching_dict = {}
            for onwardflights, returnflights in zipped:
                if 'rowid' in onwardflights:
                    rowid = onwardflights['rowid']
                    caching_dict[rowid] = onwardflights
                if 'rowid' in returnflights:
                    rowid = returnflights['rowid']
                    caching_dict[rowid] = returnflights
            # stored redis cache
            cache.set_many(caching_dict)
        else:
            messages.add_message(
                request, messages.INFO,
                'No Flights availble this source and destination')
            return HttpResponseRedirect(format_redirect_url("/", 'error=102'))
    else:
        query, search_flights_response = GO.SearchFlights(
            trip, departure, source, destination, adult, child, infant,
            s_class)
        onwardflight = search_flights_response['data']['onwardflights']
        if onwardflight:
            #Flights sorting based on price
            sorted_onward_flights = sorted(
                onwardflight, key=lambda x: x['fare']['totalfare'])
            # stored flight details in redis cache based on search queries
            # Flight row id is unique value
            caching_dict = {}
            for onwardflights in onwardflight:
                if 'rowid' in onwardflights:
                    rowid = onwardflights['rowid']
                    caching_dict[rowid] = onwardflights
            # stored redis cache
            cache.set_many(caching_dict)
        else:
            messages.add_message(
                request, messages.INFO,
                'No Flights availble this source and destination')
            return HttpResponseRedirect(format_redirect_url("/", 'error=103'))
    if 'oneway' in trip:
        response = render_to_response('v2/flight/flight_search_list.html', {
            'flight': sorted_onward_flights,
            'source': source_city,
            'destination': destination_city,
            'dateofdeparture': startdate,
            'trip': trip,
            'iata_code_return': destination,
            'iata_code_onward': source
        },
                                      context_instance=RequestContext(request))
    else:
        response = render_to_response(
            'v2/flight/flight_search_list.html', {
                'flight': zipped,
                'source': request.POST.get('flight_source', ''),
                'destination': request.POST.get('flight_destination', ''),
                'dateofarrival': enddate,
                'dateofdeparture': startdate,
                'trip': trip,
                'iata_code_return': destination,
                'iata_code_onward': source
            },
            context_instance=RequestContext(request))
        response.set_cookie('end', enddate)

    response.set_cookie('trip', trip)
    response.set_cookie('start', startdate)
    response.set_cookie('s_code', source)
    response.set_cookie('d_code', destination)
    response.set_cookie('adults', adult)
    response.set_cookie('childs', child)
    response.set_cookie('infants', infant)
    response.set_cookie('class', s_class)
    response.set_cookie('s_city', source_city)
    response.set_cookie('d_city', destination_city)
    return response
Esempio n. 50
0
    def bulk_get_rule_status(
            self, rules: Sequence[Rule]) -> Mapping[int, GroupRuleStatus]:
        keys = [self._build_rule_status_cache_key(rule.id) for rule in rules]
        cache_results: Mapping[str, GroupRuleStatus] = cache.get_many(keys)
        missing_rule_ids: Set[int] = set()
        rule_statuses: Mapping[int, GroupRuleStatus] = {}
        for key, rule in zip(keys, rules):
            rule_status = cache_results.get(key)
            if not rule_status:
                missing_rule_ids.add(rule.id)
            else:
                rule_statuses[rule.id] = rule_status

        if missing_rule_ids:
            # If not cached, attempt to fetch status from the database
            statuses = GroupRuleStatus.objects.filter(
                group=self.group, rule_id__in=missing_rule_ids)
            to_cache: Sequence[GroupRuleStatus] = list()
            for status in statuses:
                rule_statuses[status.rule_id] = status
                missing_rule_ids.remove(status.rule_id)
                to_cache.append(status)

            # We might need to create some statuses if they don't already exist
            if missing_rule_ids:
                # We use `ignore_conflicts=True` here to avoid race conditions where the statuses
                # might be created between when we queried above and attempt to create the rows now.
                GroupRuleStatus.objects.bulk_create(
                    [
                        GroupRuleStatus(rule_id=rule_id,
                                        group=self.group,
                                        project=self.project)
                        for rule_id in missing_rule_ids
                    ],
                    ignore_conflicts=True,
                )
                # Using `ignore_conflicts=True` prevents the pk from being set on the model
                # instances. Re-query the database to fetch the rows, they should all exist at this
                # point.
                statuses = GroupRuleStatus.objects.filter(
                    group=self.group, rule_id__in=missing_rule_ids)
                for status in statuses:
                    rule_statuses[status.rule_id] = status
                    missing_rule_ids.remove(status.rule_id)
                    to_cache.append(status)

                if missing_rule_ids:
                    # Shouldn't happen, but log just in case
                    self.logger.error(
                        "Failed to fetch some GroupRuleStatuses in RuleProcessor",
                        extra={
                            "missing_rule_ids": missing_rule_ids,
                            "group_id": self.group.id
                        },
                    )
            if to_cache:
                cache.set_many({
                    self._build_rule_status_cache_key(item.rule_id): item
                    for item in to_cache
                })

        return rule_statuses
Esempio n. 51
0
 def test_set_many_expiration(self):
     # set_many takes a second ``timeout`` parameter
     cache.set_many({'key1': 'spam', 'key2': 'eggs'}, 1)
     time.sleep(2)
     self.assertIsNone(cache.get('key1'))
     self.assertIsNone(cache.get('key2'))
 def test_set_many_expiration(self):
     # set_many takes a second ``timeout`` parameter
     cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
     time.sleep(2)
     self.assertEqual(cache.get("key1"), None)
     self.assertEqual(cache.get("key2"), None)
 def test_set_many(self):
     # Multiple keys can be set using set_many
     cache.set_many({"key1": "spam", "key2": "eggs"})
     self.assertEqual(cache.get("key1"), "spam")
     self.assertEqual(cache.get("key2"), "eggs")
Esempio n. 54
0
    def test_cache_versioning_get_set_many(self):
        # set, using default version = 1
        cache.set_many({'ford1': 37, 'arthur1': 42})
        assert (
            cache.get_many(['ford1', 'arthur1']) ==
            {'ford1': 37, 'arthur1': 42}
        )
        assert (
            cache.get_many(['ford1', 'arthur1'], version=1) ==
            {'ford1': 37, 'arthur1': 42}
        )
        assert cache.get_many(['ford1', 'arthur1'], version=2) == {}

        assert caches['v2'].get_many(['ford1', 'arthur1']) == {}
        assert (
            caches['v2'].get_many(['ford1', 'arthur1'], version=1) ==
            {'ford1': 37, 'arthur1': 42}
        )
        assert caches['v2'].get_many(['ford1', 'arthur1'], version=2) == {}

        # set, default version = 1, but manually override version = 2
        cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
        assert cache.get_many(['ford2', 'arthur2']) == {}
        assert cache.get_many(['ford2', 'arthur2'], version=1) == {}
        assert (
            cache.get_many(['ford2', 'arthur2'], version=2) ==
            {'ford2': 37, 'arthur2': 42}
        )

        assert (
            caches['v2'].get_many(['ford2', 'arthur2']) ==
            {'ford2': 37, 'arthur2': 42}
        )
        assert caches['v2'].get_many(['ford2', 'arthur2'], version=1) == {}
        assert (
            caches['v2'].get_many(['ford2', 'arthur2'], version=2) ==
            {'ford2': 37, 'arthur2': 42}
        )

        # v2 set, using default version = 2
        caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
        assert cache.get_many(['ford3', 'arthur3']) == {}
        assert cache.get_many(['ford3', 'arthur3'], version=1) == {}
        assert (
            cache.get_many(['ford3', 'arthur3'], version=2) ==
            {'ford3': 37, 'arthur3': 42}
        )

        assert (
            caches['v2'].get_many(['ford3', 'arthur3']) ==
            {'ford3': 37, 'arthur3': 42}
        )
        assert (
            caches['v2'].get_many(['ford3', 'arthur3'], version=1) ==
            {}
        )
        assert (
            caches['v2'].get_many(['ford3', 'arthur3'], version=2) ==
            {'ford3': 37, 'arthur3': 42}
        )

        # v2 set, default version = 2, but manually override version = 1
        caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
        assert (
            cache.get_many(['ford4', 'arthur4']) ==
            {'ford4': 37, 'arthur4': 42}
        )
        assert (
            cache.get_many(['ford4', 'arthur4'], version=1) ==
            {'ford4': 37, 'arthur4': 42}
        )
        assert cache.get_many(['ford4', 'arthur4'], version=2) == {}

        assert caches['v2'].get_many(['ford4', 'arthur4']) == {}
        assert (
            caches['v2'].get_many(['ford4', 'arthur4'], version=1) ==
            {'ford4': 37, 'arthur4': 42}
        )
        assert caches['v2'].get_many(['ford4', 'arthur4'], version=2) == {}
Esempio n. 55
0
    def iterator(self):
        cache_key = self.model._generate_cache_key("QUERY:%s" %
                                                   self._get_query_hash())
        on_cache_query_attr = self.model.value_to_list_on_cache_query()

        to_return = None
        to_cache = {}

        with_aggregates = len(self.query.aggregates) > 0
        key_list = self._fetch_from_query_cache(cache_key)

        if key_list is None:
            if not with_aggregates:
                values_list = [on_cache_query_attr]

                if len(self.query.extra):
                    values_list += self.query.extra.keys()

                key_list = [v[0] for v in self.values_list(*values_list)]
                to_cache[cache_key] = (datetime.datetime.now(), key_list)
            else:
                to_return = list(super(CachedQuerySet, self).iterator())
                to_cache[cache_key] = (datetime.datetime.now(), [
                    (row.__dict__[on_cache_query_attr],
                     dict([(k, row.__dict__[k])
                           for k in self.query.aggregates.keys()]))
                    for row in to_return
                ])
        elif with_aggregates:
            tmp = key_list
            key_list = [k[0] for k in tmp]
            with_aggregates = [k[1] for k in tmp]
            del tmp

        if (not to_return) and key_list:
            row_keys = [
                self.model.infer_cache_key({on_cache_query_attr: attr})
                for attr in key_list
            ]
            cached = cache.get_many(row_keys)

            to_return = [(ck in cached) and self.obj_from_datadict(cached[ck])
                         or ToFetch(force_unicode(key_list[i]))
                         for i, ck in enumerate(row_keys)]

            if len(cached) != len(row_keys):
                to_fetch = [
                    unicode(tr) for tr in to_return if isinstance(tr, ToFetch)
                ]

                fetched = dict([
                    (force_unicode(r.__dict__[on_cache_query_attr]), r)
                    for r in models.query.QuerySet(self.model).filter(
                        **{"%s__in" % on_cache_query_attr: to_fetch})
                ])

                to_return = [(isinstance(tr, ToFetch) and fetched[unicode(tr)]
                              or tr) for tr in to_return]
                to_cache.update(
                    dict([(self.model.infer_cache_key(
                        {on_cache_query_attr: attr}), r._as_dict())
                          for attr, r in fetched.items()]))

            if with_aggregates:
                for i, r in enumerate(to_return):
                    r.__dict__.update(with_aggregates[i])

        if len(to_cache):
            cache.set_many(to_cache, 60 * 60)

        if to_return:
            for row in to_return:
                if hasattr(row, 'leaf'):
                    row = row.leaf

                row.reset_original_state()
                yield row
Esempio n. 56
0
# 1)导入缓存功能
from django.core.cache import cache  # 导入的cache就是配置的默认缓存
from django.core.cache import caches  # caches相当于全部缓存集
from django.views.decorators.cache import never_cache, cache_page, cache_control

def_cache = caches['default']
rds_cache = caches['redis']

assert def_cache == cache
assert rds_cache != cache
print('def_cache', def_cache)
print('rds_cache', rds_cache)
# 2)设置,如果将exp过期时间设置0或负值,就是删除缓存
# cache.set('key', 'value', exp=1000)
cache.set('key', 'value')
print(cache.set_many({'a': 1, 'b': 2, 'c': 3}))
print(cache.get_many(['a', 'b', 'c']))
# 3)获取
cache.get('key')
cache.set('num', 1)
cache.incr('num')
cache.incr('num', 10)
cache.decr('num')
cache.decr('num', 5)
cache.clear()


@cache_page(60 * 15, cache="redis")  # 可以选用缓存方式
@cache_page(60 * 15, key_prefix="site1")
@cache_control(max_age=3600)
@never_cache
Esempio n. 57
0
    def handle(self, *args, **options):
        if waffle.switch_is_active('populate-multitenant-programs'):
            failure = False
            logger.info('populate-multitenant-programs switch is ON')

            catalog_integration = CatalogIntegration.current()
            username = catalog_integration.service_username

            try:
                user = User.objects.get(username=username)
            except User.DoesNotExist:
                logger.error(
                    'Failed to create API client. Service user {username} does not exist.'.format(username=username)
                )
                raise

            programs = {}
            for site in Site.objects.all():
                site_config = getattr(site, 'configuration', None)
                if site_config is None or not site_config.get_value('COURSE_CATALOG_API_URL'):
                    logger.info('Skipping site {domain}. No configuration.'.format(domain=site.domain))
                    cache.set(SITE_PROGRAM_UUIDS_CACHE_KEY_TPL.format(domain=site.domain), [], None)
                    continue

                client = create_catalog_api_client(user, site=site)
                uuids, program_uuids_failed = self.get_site_program_uuids(client, site)
                new_programs, program_details_failed = self.fetch_program_details(client, uuids)

                if program_uuids_failed or program_details_failed:
                    failure = True

                programs.update(new_programs)

                logger.info('Caching UUIDs for {total} programs for site {site_name}.'.format(
                    total=len(uuids),
                    site_name=site.domain,
                ))
                cache.set(SITE_PROGRAM_UUIDS_CACHE_KEY_TPL.format(domain=site.domain), uuids, None)

            successful = len(programs)
            logger.info('Caching details for {successful} programs.'.format(successful=successful))
            cache.set_many(programs, None)

            if failure:
                # This will fail a Jenkins job running this command, letting site
                # operators know that there was a problem.
                sys.exit(1)

        else:
            catalog_integration = CatalogIntegration.current()
            username = catalog_integration.service_username

            try:
                user = User.objects.get(username=username)
                client = create_catalog_api_client(user)
            except User.DoesNotExist:
                logger.error(
                    'Failed to create API client. Service user {username} does not exist.'.format(username=username)
                )
                raise

            try:
                querystring = {
                    'exclude_utm': 1,
                    'status': ('active', 'retired'),
                    'uuids_only': 1,
                }

                logger.info('Requesting program UUIDs.')
                uuids = client.programs.get(**querystring)
            except:  # pylint: disable=bare-except
                logger.error('Failed to retrieve program UUIDs.')
                raise

            total = len(uuids)
            logger.info('Received {total} UUIDs.'.format(total=total))

            programs = {}
            failure = False
            for uuid in uuids:
                try:
                    logger.info('Requesting details for program {uuid}.'.format(uuid=uuid))
                    program = client.programs(uuid).get(exclude_utm=1)

                    cache_key = PROGRAM_CACHE_KEY_TPL.format(uuid=uuid)
                    programs[cache_key] = program
                except:  # pylint: disable=bare-except
                    logger.exception('Failed to retrieve details for program {uuid}.'.format(uuid=uuid))
                    failure = True

                    continue

            successful = len(programs)
            logger.info('Caching details for {successful} programs.'.format(successful=successful))
            cache.set_many(programs, None)

            logger.info('Caching UUIDs for {total} programs.'.format(total=total))
            cache.set(PROGRAM_UUIDS_CACHE_KEY, uuids, None)

            if failure:
                # This will fail a Jenkins job running this command, letting site
                # operators know that there was a problem.
                sys.exit(1)
Esempio n. 58
0
def handler(cache_name,
            function=None,
            page_key="",
            force_update_cache=False,
            cache_duration=DEFAULT_CACHE_DURATION,
            *args,
            **kwargs):
    """Handles caching for results and data.

    Args:
        cache: Name of the cache to use.
        function: A function reference that returns some value to be cached. This function must only use **kwargs.
        page_key: The page name or page number to use as a key value.
        force_update_cache:
        cache_duration:
        **kwargs: Any parameters that need to be passed into "function".
    """

    cached_results = None
    # Looks through cache and will perform a search if needed.
    try:
        log.handler(
            cache_name + " - Accessed.",
            log.DEBUG,
            __logger,
        )

        # If the function was actually a list, then use set_many and/or get_many
        # All items must belong to the same cache
        # { page_key: {
        #               "function": function_value,
        #               "kwargs": {kwargs_value},
        #               "args": [args_values],
        #             },
        # ... }
        if isinstance(function, dict):
            if len(function) == 0:
                # Nothing was passed in
                return None
            else:
                # Obtain all the keys from the passed in dictionary
                requested_keys = []
                for key, value in function.items():
                    cache_key = generate_cache_key(cache_name, value["args"],
                                                   value["kwargs"], key)
                    log.handler(
                        cache_name +
                        " - Multi-execution generated cache key " + cache_key,
                        log.DEBUG,
                        __logger,
                    )
                    requested_keys.append(cache_key)

                # Search cache for all keys
                cached_results = cache.get_many(requested_keys)
                log.handler(
                    cache_name + " - Multi-execution detected " +
                    str(len(cached_results)) + " available keys.",
                    log.INFO,
                    __logger,
                )

                # If nothing was in cache, or cache was expired, run function()
                thread_list = []
                for cache_key in requested_keys:
                    if not cached_results.__contains__(cache_key):
                        key = obtain_key_from_cache_key(cache_key)
                        thread = ReturnThread(
                            target=function[key]["function"],
                            args=function[key]["args"],
                            kwargs=function[key]["kwargs"],
                        )
                        thread.start()
                        thread_list.append((cache_key, thread))

                missing_keys = {}
                for key, thread in thread_list:
                    missing_keys[key] = thread.join()

                # Set values in cache for any newly executed functions
                if bool(missing_keys):
                    log.handler(
                        cache_name + " - Multi-execution detected " +
                        str(len(missing_keys)) + " missing keys.",
                        log.INFO,
                        __logger,
                    )
                    cache.set_many(missing_keys, cache_duration)

                # Return all results
                cached_results.update(missing_keys)

                # If results were none, log it.
                if cached_results is None:
                    log.handler(
                        cache_name +
                        " - Multi-execution generated no results!",
                        log.WARNING,
                        __logger,
                    )

                return cached_results

        # Get the cached value
        cache_key = generate_cache_key(cache_name, args, kwargs, page_key)

        log.handler(
            cache_name + " - Generated cache key " + cache_key,
            log.DEBUG,
            __logger,
        )
        cached_results = cache.get(cache_key)

        # No function was provided, just return bare cache value
        if function is None:
            log.handler(
                cache_name + " - Requested raw cache values.",
                log.DEBUG,
                __logger,
            )
            return cached_results

        # If the user wants to force update the cache, nothing
        # was in cache, or cache was expired, run function()
        if cached_results is None or force_update_cache:
            function_results = function(*args, **kwargs)
            log.handler(
                cache_name + " - Function " + function.__name__ +
                " has been executed!",
                log.INFO,
                __logger,
            )
            cache.set(cache_key, function_results, cache_duration)
            return function_results

        if cached_results is None:
            log.handler(
                cache_name + " - No cached results found!",
                log.INFO,
                __logger,
            )

        # If a value was in cache and not expired, return that value
        return cached_results

    except:
        # If the function threw an exception, return none.
        if isinstance(function, dict):
            log.handler(
                "Function list failed to execute!",
                log.ERROR,
                __logger,
            )
        else:
            log.handler(
                "Function " + function.__name__ + " failed to execute!",
                log.ERROR,
                __logger,
            )

        return None
Esempio n. 59
0
    def diff(self, request, new_repoid, old_repoid):
        start = datetime.datetime.now()

        is_popup = request.GET.get('is_popup', False)
        do_regen = request.GET.get('do_regen', False)
        progress_id = request.GET.get('progress_id', None)

        if not request.is_ajax() and request.method != 'POST':
            progress_id = uuid.uuid4()
            context = {
                "title": "",
                "opts": self.model._meta,
                "app_label": self.model._meta.app_label,
                "progress_id": progress_id,
                "do_regen": do_regen,
            }
            return TemplateResponse(request,
                                    'diff.html',
                                    context=context,
                                    current_app=self.admin_site.name)

        progress_cb = lambda x: None
        if progress_id is not None:
            progress_cb = lambda x: cache.set(progress_id, x, 60 * 5)

        progress_cb("Initializing repositories")
        new_repo = Repo.objects.select_related(
            "server",
            "platform").prefetch_related("projects", "components",
                                         "containers").get(pk=new_repoid)
        old_repo = Repo.objects.select_related(
            "server",
            "platform").prefetch_related("projects", "components",
                                         "containers").get(pk=old_repoid)
        live_diff = (new_repo.is_live, old_repo.is_live)

        end = datetime.datetime.now() - start
        context = {
            "title": "",
            "opts": self.model._meta,
            "app_label": self.model._meta.app_label,
            'is_popup': is_popup,
            'new_obj': new_repo,
            'old_obj': old_repo,
            'live_diff': live_diff,
            'processing_time': end.total_seconds(),
        }

        if request.method == 'POST':
            progress_cb("Creating request")
            if not (live_diff[0] and live_diff[1]):
                raise ValidationError("Can only creq on live repos")
            submit = request.POST.getlist('submit')
            delete = request.POST.getlist('delete')
            comment = "Request by %s from repodiff of %s to %s" % (
                request.user, new_repo, old_repo)
            creq_msg = request.POST.get('creq_msg')
            if creq_msg:
                comment = "%s\n%s" % (comment, creq_msg)
            mesgs, errors = _creq(new_repo, old_repo, submit, delete, comment)
            for msg in mesgs:
                messages.info(request, msg, extra_tags="safe")
            for err in errors:
                messages.error(request, err, extra_tags="safe")
            progress_cb("Done")

            return TemplateResponse(request,
                                    'diff_noprogress.html',
                                    context=context,
                                    current_app=self.admin_site.name)

        progress_cb("Generating repository diff")
        cachekey = "%s%s%s" % ("repodiff", new_repoid, old_repoid)
        cached = cache.get_many([cachekey, cachekey + 'ts'])
        diff = cached.get(cachekey)
        diffts = cached.get(cachekey + 'ts')

        if diff is None or do_regen:
            diff = _diff_sacks(new_repo, old_repo, progress_cb)
            diffts = datetime.datetime.now()
            cachelife = (60 *
                         3) if (live_diff[0] or live_diff[1]) else (60 * 60 *
                                                                    24)
            cache.set_many({
                cachekey: diff,
                cachekey + 'ts': diffts
            }, cachelife)

        filter_repos = set(request.GET.getlist("repo", None))
        filter_meta = _get_filter_meta(request.GET)
        diff = _sort_filter_diff(diff, repos=filter_repos, meta=filter_meta)

        trace_reqs = _get_trace(old_repo, new_repo)
        issue_ref = []
        names = []
        for i in new_repo.platform.issuetracker_set.all():
            if not i.name in names:
                issue_ref.append({'name': i.name, 're': i.re, 'url': i.url})
                names.append(i.name)

        full_path = "%s?" % request.path
        for query, values in request.GET.lists():
            full_path += "&".join(['%s=%s' % (query, val) for val in values])

        full_path += "&"

        end = datetime.datetime.now() - start
        context.update({
            'title': "Comparing repositories",
            'packagemetatypes': list(PackageMetaType.objects.all()),
            'diff': diff,
            'diffts': diffts,
            'trace': trace_reqs,
            'issue_ref': json.dumps(issue_ref),
            'full_path': full_path,
            'processing_time': end.total_seconds()
        })

        progress_cb("Done")
        return TemplateResponse(request,
                                "diff_content.html",
                                context=context,
                                current_app=self.admin_site.name)
    def handle(self, *args, **options):
        failure = False
        logger.info('populate-multitenant-programs switch is ON')

        catalog_integration = CatalogIntegration.current()
        username = catalog_integration.service_username

        try:
            user = User.objects.get(username=username)
        except User.DoesNotExist:
            logger.error(
                'Failed to create API client. Service user {username} does not exist.'.format(username=username)
            )
            raise

        programs = {}
        pathways = {}
        for site in Site.objects.all():
            site_config = getattr(site, 'configuration', None)
            if site_config is None or not site_config.get_value('COURSE_CATALOG_API_URL'):
                logger.info('Skipping site {domain}. No configuration.'.format(domain=site.domain))
                cache.set(SITE_PROGRAM_UUIDS_CACHE_KEY_TPL.format(domain=site.domain), [], None)
                cache.set(SITE_PATHWAY_IDS_CACHE_KEY_TPL.format(domain=site.domain), [], None)
                continue

            client = create_catalog_api_client(user, site=site)
            uuids, program_uuids_failed = self.get_site_program_uuids(client, site)
            new_programs, program_details_failed = self.fetch_program_details(client, uuids)
            new_pathways, pathways_failed = self.get_pathways(client, site)
            new_pathways, new_programs, pathway_processing_failed = self.process_pathways(site, new_pathways,
                                                                                          new_programs)

            if program_uuids_failed or program_details_failed or pathways_failed or pathway_processing_failed:
                failure = True

            programs.update(new_programs)
            pathways.update(new_pathways)

            logger.info('Caching UUIDs for {total} programs for site {site_name}.'.format(
                total=len(uuids),
                site_name=site.domain,
            ))
            cache.set(SITE_PROGRAM_UUIDS_CACHE_KEY_TPL.format(domain=site.domain), uuids, None)

            pathway_ids = new_pathways.keys()
            logger.info('Caching ids for {total} pathways for site {site_name}.'.format(
                total=len(pathway_ids),
                site_name=site.domain,
            ))
            cache.set(SITE_PATHWAY_IDS_CACHE_KEY_TPL.format(domain=site.domain), pathway_ids, None)

        successful_programs = len(programs)
        logger.info('Caching details for {successful_programs} programs.'.format(
            successful_programs=successful_programs))
        cache.set_many(programs, None)

        successful_pathways = len(pathways)
        logger.info('Caching details for {successful_pathways} pathways.'.format(
            successful_pathways=successful_pathways))
        cache.set_many(pathways, None)

        if failure:
            # This will fail a Jenkins job running this command, letting site
            # operators know that there was a problem.
            sys.exit(1)