def collectioncompany(request): try: companyid = request.POST.get('companyid') company = Company.objects.get(pk=companyid) user = request.user except Company.DoesNotExist: raise Http404("Company does not exist") collection = CollectionCompany.objects.filter(company=company, user=user) cachekey = "company_collection_" + str(companyid) if collection: collection.delete() collecicon = '收藏' collection_icon = 'glyphicon-star-empty' if cache.get(cachekey) != None: cache.decr(cachekey) else: cache.set(cachekey, company.collectioncompany_set.count(), 1209600) else: c = CollectionCompany(user=user, company=company) c.save() collecicon = '已收藏' collection_icon = 'glyphicon-star' if cache.get(cachekey) != None: cache.incr(cachekey) else: cache.set(cachekey, company.collectioncompany_set.count(), 1209600) data = { 'collecicon': collecicon, 'collection_icon': collection_icon, 'collectioncount': cache.get(cachekey), } json_data = json.dumps(data) return HttpResponse(json_data, content_type='application/json')
def acquire_lock(lock_name): """ A contextmanager to wait until an exclusive lock is available, hold the lock and then release it when the code under context is complete. TODO: This code doesn't work like it should. It doesn't wait indefinitely for the lock and in fact cycles through very quickly. """ for _ in range(10): try: value = cache.incr(lock_name) except ValueError: cache.set(lock_name, 0) value = cache.incr(lock_name) if value == 1: break else: cache.decr(lock_name) else: yield cache.set(lock_name, 0) return yield cache.decr(lock_name)
def insertdb(name, email, school): # insert into database # check if exist in email list email_item = EmailList.objects.filter(email=email) if not email_item: # insert into email list email_id = EmailList.objects.create(email=email).id else: email_id = email_item[0].id # check if exist in user info if UserInfo.objects.filter(emailid=email_id): raise UserAlreadyRegisteredError('') # insert into user info UserInfo.objects.create(name=name, emailid=email_id, school=school) # generate security email address and insert into check list email_qr = email + '_' + str(random.randint(10, 99)) # check if there has enough tickets print 'tickets left', cache.get('tickets') if cache.get('tickets') <= 0: raise TicketSoldOutError('') cache.decr('tickets') return email, email_qr, email_id
def decrement_counter_flagged(sender, flag, comment, **kwargs): if flag.flag == flag.MODERATOR_DELETION and getattr(comment, "is_public", True): key = get_cache_key_from_comment(comment) try: cache.decr(key) except ValueError: pass
def collectioninvestment(request): try: investmentid = request.POST.get('investmentid') investment = Investment.objects.get(pk=investmentid) user = request.user except investment.DoesNotExist: raise Http404("investment does not exist") collection = CollectionInvestment.objects.filter(investment=investment, user=user) cachekey = "investment_collection_" + str(investmentid) if collection: collection.delete() collecicon = '收藏' collection_icon = 'glyphicon-star-empty' if cache.get(cachekey) != None: cache.decr(cachekey) else: cache.set(cachekey, investment.collectioninvestment_set.count(), 1209600) else: c = CollectionInvestment(user=user, investment=investment) c.save() collecicon = '已收藏' collection_icon = 'glyphicon-star' if cache.get(cachekey) != None: cache.incr(cachekey) else: cache.set(cachekey, investment.collectioninvestment_set.count(), 1209600) data = { 'collecicon': collecicon, 'collection_icon': collection_icon, 'collectioncount': cache.get(cachekey), } json_data = json.dumps(data) return HttpResponse(json_data, content_type='application/json')
def decrement_counter_post_save(sender, instance, **kwargs): if getattr(instance, "is_public", True) and not getattr(instance, "is_removed", False): key = get_cache_key_from_comment(instance) try: cache.decr(key) except ValueError: pass
def test_decr(self): cache.set('calc', 43) self.assertEqual(cache.decr('calc'), 42) self.assertEqual(cache.get('calc'), 42) self.assertEqual(cache.decr('calc', 10), 32) self.assertEqual(cache.get('calc'), 32) self.assertEqual(cache.decr('calc', -10), 42) with self.assertRaises(ValueError): cache.decr('does_not_exist')
def test_decr(self): # Cache values can be decremented cache.set('answer', 43) self.assertEqual(cache.decr('answer'), 42) self.assertEqual(cache.get('answer'), 42) self.assertEqual(cache.decr('answer', 10), 32) self.assertEqual(cache.get('answer'), 32) self.assertEqual(cache.decr('answer', -10), 42) self.assertRaises(ValueError, cache.decr, 'does_not_exist')
def test_decr(self): # Cache values can be decremented cache.set("answer", 43) self.assertEqual(cache.decr("answer"), 42) self.assertEqual(cache.get("answer"), 42) self.assertEqual(cache.decr("answer", 10), 32) self.assertEqual(cache.get("answer"), 32) self.assertEqual(cache.decr("answer", -10), 42) self.assertRaises(ValueError, cache.decr, "does_not_exist")
def test_incr_decr_timeout(self): """incr/decr does not modify expiry time (matches memcached behavior)""" key = "value" _key = cache.make_key(key) cache.set(key, 1, timeout=cache.default_timeout * 10) expire = cache._expire_info[_key] cache.incr(key) self.assertEqual(expire, cache._expire_info[_key]) cache.decr(key) self.assertEqual(expire, cache._expire_info[_key])
def test_decr(self): # Cache values can be decremented cache.set('answer', 43) assert cache.decr('answer') == 42 assert cache.get('answer') == 42 assert cache.decr('answer', 10) == 32 assert cache.get('answer') == 32 assert cache.decr('answer', -10) == 42 with pytest.raises(ValueError): cache.decr('does_not_exist')
def setread(self): self.read = True instancesave.delay(self) cachekey = "user_unread_count" + str(self.recipient.id) if cache.get(cachekey) != None: cache.decr(cachekey) return '' else: unread = Notification.objects.filter(recipient = self.recipient).filter(read = False).count() cache.set(cachekey, unread, settings.CACHE_EXPIRETIME) return ''
def subtopicount(sender, **kwargs): topic = kwargs.pop("instance") group = Group.objects.get(id =topic.group.id) value = topic.group.id group.topicount -= 1 group.save() cachekey = "group_topic_count_" + str(group.id) if cache.get(cachekey) != None: cache.decr(cachekey) else: group = Group.objects.get(id=value) cache.set(cachekey, group.topicount)
def revert_rate_limit(scope, request): """Revert rate limit to previous state. This can be used when rate limiting POST, but ignoring some events. """ key = get_cache_key(scope, request) try: # Try to decrease cache key cache.decr(key) except ValueError: pass
def validateDo(request, base64encoded): ip = getClientIP(request) jsonString = b64decode(base64encoded).decode() data = json.loads(jsonString) password = genPassword(16) field = False try: isValid(data) except Exception as e: return HttpResponseRedirect("error/{0}".format(e)) reqTimesLeft = cache.get("reqTimesLeft_{0}".format(ip)) if reqTimesLeft is None: cache.add("reqTimesLeft_{0}".format(ip), 5, 900) elif reqTimesLeft > 0: cache.decr("reqTimesLeft_{0}".format(ip)) else: return HttpResponseRedirect("error/{0}".format(50)) from selenium import webdriver browser = webdriver.PhantomJS() for server in [randint(1,9) for i in range(3)]: try: url = "http://forum{0}.hkgolden.com/ProfilePage.aspx?userid={1}".format(server, data["hkg_uid"]) browser.get(url) elem = browser.find_element_by_xpath("//*") html = elem.get_attribute("outerHTML") field = pq(html)("#ctl00_ContentPlaceHolder1_tc_Profile_tb0_lb_website").html() break except: pass browser.close() if not field and field != "": return HttpResponseRedirect("error/{0}".format(100)) #Down server elif field != base64encoded: return HttpResponseRedirect("error/{0}".format(101)) #Wrong string try: conn = MinecraftJsonApi(host = 'localhost', port = 6510, username = '******', password = '******') conn.call("players.name.whitelist", data["mc_name"]) conn.call("server.run_command", "authme register {0} {1}".format(data["mc_name"], password)) except: return HttpResponseRedirect("error/{0}".format(102)) #Failed to communicate with server else: newUser = Whitelist.objects.create(ip = ip, time = time(), mc_name = data["mc_name"], hkg_uid = data["hkg_uid"], init_password = password) newUser.save() payload = {"password": password, "mc_name": data["mc_name"]} jsonString = json.dumps(payload) base64encoded = b64encode(jsonString.encode()).decode() return HttpResponseRedirect("../success/{0}".format(base64encoded))
def acquire(self, value: int=1) -> bool: """ Decrement semaphore value. If semaphore is locked or value parameter is greater than current semaphore value then raise ValueError indicating that cannot be acquired. :param value: Number of values to acquire. :return: True if semaphore acquired. """ current_value = self.value() if current_value < 1 or current_value < value: raise ValueError('Semaphore cannot be acquired') cache.decr(self._cache_key, value) return True
def leaveRoom(request): ''' A method to decrease the number of user in the chatroom, remove the user in the list, when user leaves the chatroom ''' if 'roomId' not in request.POST or not request.POST['roomId']: return redirect(reverse(chatIndex)) roomId = request.POST['roomId'] user = request.user print 'leave ' + roomId global room_key, user_prefix cache.decr(room_key % (roomId)) cache.delete(user_prefix % (roomId) + user.username) return HttpResponse('')
def commentdislike(request): try: commentid = request.POST.get('commentid') comment = Comment.objects.get(pk=commentid) user = request.user except Article.DoesNotExist: raise Http404("Article does not exist") try: commentdislike = CommentDisLike.objects.get(comment=comment, user=user) except: commentdislike = None if commentdislike: commentdislikecount = -1 #commentdislike.delete() instancedelete.delay(commentdislike) #减去缓存中评论点赞数 cachekey = "comment_dislike_count_" + str(comment.id) if cache.get(cachekey) != None: cache.decr(cachekey) else: cache.set(cachekey, comment.commentdislike_set.count()) cache.decr(cachekey) # comment.readers = comment.readers - 1 # comment.save() readersout.delay(comment) else: commentdislikecount = +1 #加上缓存中评论点赞数 cachekey = "comment_dislike_count_" + str(comment.id) if cache.get(cachekey) != None: cache.incr(cachekey) else: cache.set(cachekey, comment.commentdislike_set.count()) cache.incr(cachekey) # comment.readers = comment.readers + 1 # comment.save() readersin.delay(comment) c = CommentDisLike(user=user, comment=comment) #c.save() instancesave.delay(c) data = { 'commentdislikecount': commentdislikecount, } json_data = json.dumps(data) #print 'commentlike' return HttpResponse(json_data, content_type='application/json')
def update_posts_like(request, post_id): error_messages = { 1: "Sorry, you are run out of points.\ You could get points with great posts.", 2: "You already liked this post before.", 3: "Something wrong with Redis server.", 4: "Something wrong with cauculate points." } post_author_id = request.POST.get("post_author_id", "") post_likes_users = get_redis_connection("default") post_object = Post.objects.get(id=post_id) act_id = post_object.act_id user_id = str(request.user.id) user_points = cache.get("user_points_" + user_id) if user_points < 1: return HttpResponse(error_messages[1], status=500) # if user already like the post user_like = post_likes_users.zscore( "post_"+str(post_id), "user"+":"+str(request.user.id) ) if user_like: return HttpResponse(error_messages[2], status=500) # add like to post post_likes_users.zadd( ("post_"+str(post_id)), time.time(), "user"+":"+str(request.user.id) ) post_likes_users.zadd( ("act_"+str(act_id)), time.time(), "post"+":"+str(post_id) ) # update user points cache.decr("user_points_" + user_id) cache.incr("user_points_" + str(post_author_id)) return HttpResponse(status=201)
def delete(request, lets_id): """Delete a Lets object, only if logged in user is the creator.""" lets = get_object_or_404(Lets, pk=lets_id) if lets.creator != request.user: raise PermissionDenied lets.delete() # Coins profile = request.user.profile user_key = Lets.u_cache_key(request.user) lets_key = cache.get(user_key) if lets_key: if lets_key > 0: # No 250 - cache.decr(user_key) if lets_key <= 0: # Yes 250 - profile.update(coins=profile.coins - ucs.ACTIONS_COINS['lets']['daily']) cache.delete(user_key) else: # yes 250 - profile.update(coins=profile.coins - ucs.ACTIONS_COINS['lets']['daily']) update_progress(profile, action='lets') update_rewards(request.user, 'lets') process_mojo_action_down(request.user, 'lets') # TODO: return progress_data with ajax request. Modify js on lets side to # update profile completion widget next_url = reverse('lets.wall', args=[request.user.username]) if request.is_ajax(): return JSONResponse({'is_valid': True, 'msg': 'Deleted'}) return HttpResponseRedirect(next_url)
def collection(request): try: articleid = request.POST.get('articleid') article = Article.objects.get(pk=articleid) user = request.user except Article.DoesNotExist: raise Http404("Article does not exist") try: collection = Collection.objects.get(article=article, user=user) except: collection = None if collection: collection.delete() collcount = -1 cachekey = "article_collection_" + str(articleid) if cache.get(cachekey) != None: cache.decr(cachekey) else: cache.set(cachekey, article.collection_set.count(), settings.CACHE_EXPIRETIME) collecicon = '收藏' else: c = Collection(user=user, article=article) c.save() collcount = 1 cachekey = "article_collection_" + str(articleid) if cache.get(cachekey) != None: cache.incr(cachekey) else: cache.set(cachekey, article.collection_set.count(), settings.CACHE_EXPIRETIME) collecicon = '已收藏' data = { 'collecicon': collecicon, 'collcount': collcount, } json_data = json.dumps(data) return HttpResponse(json_data, content_type='application/json')
def test_cache_versioning_incr_decr(self): cache.set('answer1', 37, version=1) cache.set('answer1', 42, version=2) cache.incr('answer1') self.assertEqual(cache.get('answer1', version=1), 38) self.assertEqual(cache.get('answer1', version=2), 42) cache.decr('answer1') self.assertEqual(cache.get('answer1', version=1), 37) self.assertEqual(cache.get('answer1', version=2), 42) cache.set('answer2', 37, version=1) cache.set('answer2', 42, version=2) cache.incr('answer2', version=2) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), 43) cache.decr('answer2', version=2) self.assertEqual(cache.get('answer2', version=1), 37) self.assertEqual(cache.get('answer2', version=2), 42) cache.set('answer3', 37, version=1) cache.set('answer3', 42, version=2) caches['v2'].incr('answer3') self.assertEqual(cache.get('answer3', version=1), 37) self.assertEqual(cache.get('answer3', version=2), 43) caches['v2'].decr('answer3') self.assertEqual(cache.get('answer3', version=1), 37) self.assertEqual(cache.get('answer3', version=2), 42) cache.set('answer4', 37, version=1) cache.set('answer4', 42, version=2) caches['v2'].incr('answer4', version=1) self.assertEqual(cache.get('answer4', version=1), 38) self.assertEqual(cache.get('answer4', version=2), 42) caches['v2'].decr('answer4', version=1) self.assertEqual(cache.get('answer4', version=1), 37) self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self): cache.set("answer1", 37, version=1) cache.set("answer1", 42, version=2) cache.incr("answer1") self.assertEqual(cache.get("answer1", version=1), 38) self.assertEqual(cache.get("answer1", version=2), 42) cache.decr("answer1") self.assertEqual(cache.get("answer1", version=1), 37) self.assertEqual(cache.get("answer1", version=2), 42) cache.set("answer2", 37, version=1) cache.set("answer2", 42, version=2) cache.incr("answer2", version=2) self.assertEqual(cache.get("answer2", version=1), 37) self.assertEqual(cache.get("answer2", version=2), 43) cache.decr("answer2", version=2) self.assertEqual(cache.get("answer2", version=1), 37) self.assertEqual(cache.get("answer2", version=2), 42) cache.set("answer3", 37, version=1) cache.set("answer3", 42, version=2) caches["v2"].incr("answer3") self.assertEqual(cache.get("answer3", version=1), 37) self.assertEqual(cache.get("answer3", version=2), 43) caches["v2"].decr("answer3") self.assertEqual(cache.get("answer3", version=1), 37) self.assertEqual(cache.get("answer3", version=2), 42) cache.set("answer4", 37, version=1) cache.set("answer4", 42, version=2) caches["v2"].incr("answer4", version=1) self.assertEqual(cache.get("answer4", version=1), 38) self.assertEqual(cache.get("answer4", version=2), 42) caches["v2"].decr("answer4", version=1) self.assertEqual(cache.get("answer4", version=1), 37) self.assertEqual(cache.get("answer4", version=2), 42)
def test_cache_versioning_incr_decr(self): cache.set('answer1', 37, version=1) cache.set('answer1', 42, version=2) cache.incr('answer1') assert cache.get('answer1', version=1) == 38 assert cache.get('answer1', version=2) == 42 cache.decr('answer1') assert cache.get('answer1', version=1) == 37 assert cache.get('answer1', version=2) == 42 cache.set('answer2', 37, version=1) cache.set('answer2', 42, version=2) cache.incr('answer2', version=2) assert cache.get('answer2', version=1) == 37 assert cache.get('answer2', version=2) == 43 cache.decr('answer2', version=2) assert cache.get('answer2', version=1) == 37 assert cache.get('answer2', version=2) == 42 cache.set('answer3', 37, version=1) cache.set('answer3', 42, version=2) caches['v2'].incr('answer3') assert cache.get('answer3', version=1) == 37 assert cache.get('answer3', version=2) == 43 caches['v2'].decr('answer3') assert cache.get('answer3', version=1) == 37 assert cache.get('answer3', version=2) == 42 cache.set('answer4', 37, version=1) cache.set('answer4', 42, version=2) caches['v2'].incr('answer4', version=1) assert cache.get('answer4', version=1) == 38 assert cache.get('answer4', version=2) == 42 caches['v2'].decr('answer4', version=1) assert cache.get('answer4', version=1) == 37 assert cache.get('answer4', version=2) == 42
def test_decr(self): key = 'test decr' cache.set(key, 101) self.assertEqual(100, cache.decr(key)) self.assertEqual(50, cache.decr(key, 50)) self.assertEqual(100, cache.decr(key, -50))
def upload_results_file(pending_upload_id: int): pending_upload = PendingUpload.objects.get(id=pending_upload_id) pending_upload.status = PendingUpload.STATE_STARTED pending_upload.save() data_source = pending_upload.data_source logger.info( "Started to process file [datasource %d, pending upload %d]", data_source.id, pending_upload_id, ) try: logger.info( "Checking file format and data [datasource %d, pending upload %d]", data_source.id, pending_upload_id, ) file_metadata, data = extract_data_from_uploaded_file( pending_upload.uploaded_file) try: cache.incr("celery_workers_updating", ignore_key_check=True) with RWLock( # several workers can update their records in paralel -> same as -> several threads can read from the same file cache.client.get_client(), "celery_worker_updating", RWLock.READ, expire=None, ), cache.lock( # but only one worker can make updates associated to a specific data source at the same time f"celery_worker_lock_db_{data_source.id}" ), transaction.atomic(using=router.db_for_write(AchillesResults)): logger.info( "Updating results data [datasource %d, pending upload %d]", data_source.id, pending_upload_id, ) pending_upload.uploaded_file.seek(0) update_achilles_results_data(logger, pending_upload, file_metadata) logger.info( "Creating an upload history record [datasource %d, pending upload %d]", data_source.id, pending_upload_id, ) data_source.release_date = data["source_release_date"] data_source.save() pending_upload.uploaded_file.seek(0) UploadHistory.objects.create( data_source=data_source, r_package_version=data["r_package_version"], generation_date=data["generation_date"], cdm_release_date=data["cdm_release_date"], cdm_version=data["cdm_version"], vocabulary_version=data["vocabulary_version"], uploaded_file=pending_upload.uploaded_file.file, pending_upload_id=pending_upload.id, ) pending_upload.uploaded_file.delete() pending_upload.delete() finally: workers_updating = cache.decr("celery_workers_updating") except Exception as e: pending_upload.status = PendingUpload.STATE_FAILED pending_upload.save() raise e # The lines below can be used to later update materialized views of each chart # To be more efficient, they should only be updated when the is no more workers inserting records if not workers_updating: refresh(logger, data_source.id)
def decr(self, key, delta=1): key = self._clean_key(key) logger.debug("Decrementing %s by %d" % (key, delta)) return cache.decr(key, delta)
def decr(self, delta=1): ''' degeri delta kadar azaltir ''' return cache.decr(self.key, delta=delta)
def test_decr_range(self): cache.set('underwhelm', BIGINT_SIGNED_MIN + 1) cache.decr('underwhelm') with pytest.raises(OperationalError): cache.decr('underwhelm')
assert def_cache == cache assert rds_cache != cache print('def_cache', def_cache) print('rds_cache', rds_cache) # 2)设置,如果将exp过期时间设置0或负值,就是删除缓存 # cache.set('key', 'value', exp=1000) cache.set('key', 'value') print(cache.set_many({'a': 1, 'b': 2, 'c': 3})) print(cache.get_many(['a', 'b', 'c'])) # 3)获取 cache.get('key') cache.set('num', 1) cache.incr('num') cache.incr('num', 10) cache.decr('num') cache.decr('num', 5) cache.clear() @cache_page(60 * 15, cache="redis") # 可以选用缓存方式 @cache_page(60 * 15, key_prefix="site1") @cache_control(max_age=3600) @never_cache def myview(request): pass """ https://segmentfault.com/q/1010000009705858 https://docs.djangoproject.com/zh-hans/3.0/topics/cache/#cache-key-prefixing
def test_cant_decr_decimals(self): # Cached values that aren't ints can't be decremented cache.set('answer', Decimal('9.9')) with pytest.raises(ValueError): cache.decr('answer')
def main(): usage = "usage: %prog [options]" parser = OptionParser(usage=usage) parser.add_option("-q", action="store_true", default=False, help="quiet mode", dest="quiet") parser.add_option("-d", action="store_true", default=False, help="debug mode", dest="debug") (options, args) = parser.parse_args() if len(args) != 0: parser.error("incorrect number of arguments") return 1 loglevel = "INFO" if options.quiet: loglevel = "WARNING" if options.debug: loglevel = "DEBUG" logger = logging.getLogger() logger.setLevel(logging._levelNames[loglevel]) fmt = "[MON:%(levelname)s %(asctime)s] %(message)s" formatter = logging.Formatter(fmt, "%T") handler = logging.StreamHandler(sys.stdout) handler.setFormatter(formatter) logger.handlers = [] logger.addHandler(handler) cstate = State.objects.get(name="CREATED") rstate = State.objects.get(name="RUNNING") estate = State.objects.get(name="EXITING") dstate = State.objects.get(name="DONE") fstate = State.objects.get(name="FAULT") ctimeout = 6 rtimeout = 72 etimeout = 30 ftimeout = 48 # 96 # created state deltat = datetime.now(pytz.utc) - timedelta(hours=ctimeout) # cjobs = Job.objects.filter(state=cstate, last_modified__lt=deltat) cjobs = Job.objects.filter(state=cstate, last_modified__lt=deltat, flag=False) logging.info("Stale created: %d" % cjobs.count()) # running state deltat = datetime.now(pytz.utc) - timedelta(hours=rtimeout) rjobs = Job.objects.filter(state=rstate, last_modified__lt=deltat, flag=False) logging.info("Stale running: %d" % rjobs.count()) # exiting state deltat = datetime.now(pytz.utc) - timedelta(minutes=etimeout) ejobs = Job.objects.filter(state=estate, last_modified__lt=deltat) logging.info("Stale exiting: %d" % ejobs.count()) # flagged jobs deltat = datetime.now(pytz.utc) - timedelta(hours=ftimeout) fjobs = Job.objects.filter(last_modified__lt=deltat, flag=True) logging.info("Stale flagged: %d" % fjobs.count()) skey = {"CREATED": "fcr", "RUNNING": "frn", "EXITING": "fex"} ################ for j in cjobs: # flag stale created jobs if j.flag: continue # PAL commented to reduce mon_message table # msg = "In CREATED state >%dhrs so flagging the job" % ctimeout # m = Message(job=j, msg=msg, client="127.0.0.1") # m.save() # db.jobs.update({'name': j.id},{ '$push' : { 'msgs' : msg} }, upsert=True) # redis here.... j.flag = True j.save() for j in rjobs: # flag stale running jobs if j.flag: continue # PAL commented to reduce mon_message table # msg = "In RUNNING state >%dhrs so flagging the job" % rtimeout # m = Message(job=j, msg=msg, client="127.0.0.1") # m.save() # db.jobs.update({'name': j.id},{ '$push' : { 'msgs' : msg} }, upsert=True) j.flag = True j.save() for j in fjobs: # move flagged jobs to FAULT state msg = "Job flagged for >%dhrs so setting state to FAULT" % ftimeout m = Message(job=j, msg=msg, client="127.0.0.1") m.save() # db.jobs.update({'name': j.id},{ '$push' : { 'msgs' : msg} }, upsert=True) msg = "%s_%s: %s -> FAULT, stale job" % (j.fid.name, j.cid, j.state) logging.debug(msg) j.state = fstate j.save() ################ # prefix = skey[statenow.name] # key = "%s%d" % (prefix, j.fid.id) # try: # val = cache.decr(key) # except ValueError: # # key not known so set to current count # msg = "MISS key: %s" % key # logging.debug(msg) # val = Job.objects.filter(fid=j.fid, state=statenow).count() # added = cache.add(key, val) # if added: # msg = "Added DB count for key %s : %d" % (key, val) # logging.debug(msg) # else: # msg = "Failed to decr key: %s" % key # logging.debug(msg) # # key = "fft%d" % j.fid.id # try: # val = cache.incr(key) # except ValueError: # # key not known so set to current count # msg = "MISS key: %s" % key # logging.debug(msg) # val = Job.objects.filter(fid=j.fid, state=fstate).count() # added = cache.add(key, val) # if added: # msg = "Added DB count for key %s : %d" % (key, val) # logging.debug(msg) # else: # msg = "Failed to incr key: %s" % key # logging.debug(msg) for j in ejobs: # move EXITING jobs to DONE state msg = "%s -> DONE" % j.state m = Message(job=j, msg=msg, client="127.0.0.1") m.save() msg = "%s_%s: %s -> DONE" % (j.fid.name, j.cid, j.state) logging.debug(msg) j.state = dstate j.save() key = "fex%d" % j.fid.id try: val = cache.decr(key) except ValueError: # key not known so set to current count msg = "MISS key: %s" % key logging.debug(msg) val = Job.objects.filter(fid=j.fid, state=estate).count() added = cache.add(key, val) if added: msg = "Added DB count for key %s : %d" % (key, val) logging.debug(msg) else: msg = "Failed to decr key: %s" % key logging.debug(msg) key = "fdn%d" % j.fid.id try: val = cache.incr(key) except ValueError: # key not known so set to current count msg = "MISS key: %s" % key logging.debug(msg) val = Job.objects.filter(fid=j.fid, state=dstate).count() added = cache.add(key, val) if added: msg = "Added DB count for key %s : %d" % (key, val) logging.debug(msg) else: msg = "Failed to incr key: %s, db count: %d" % (key, val) logging.debug(msg)