def _make_list(self, key, items, callback=None): yield gen.Task(self.client.delete, key) for i in items: yield gen.Task(self.client.rpush, key, i) if callback: callback(True)
def test_msetnx(self): res = yield gen.Task(self.client.msetnx, {'a': 1, 'b': 2}) self.assertEqual(res, True) res = yield gen.Task(self.client.msetnx, {'b': 3, 'c': 4}) self.assertEqual(res, False) self.stop()
def test_ping(self): res = yield gen.Task(self.client.ping) self.assertEqual(res, True) self.stop()
def f(): yield gen.Task(self.io_loop.add_callback) 1 / 0
def test_setget_unicode(self): res = yield gen.Task(self.client.set, 'foo', u'бар') self.assertEqual(res, True) res = yield gen.Task(self.client.get, 'foo') self.assertEqual(res, 'бар') self.stop()
def f(): try: yield gen.Task(fail_task) raise Exception("did not get expected exception") except ZeroDivisionError: self.stop()
def test_task_transfer_stack_context(self): yield gen.Task(self.function_with_stack_context) self.assertEqual(self.named_contexts, [])
def _ExecuteAll(self, operation_id=None): """Tries to acquire the operation lock. If it is acquired, queries for each operation owned by the user and executes each in turn. """ self._requery = False results = yield gen.Task(Lock.TryAcquire, self._client, LockResourceType.Operation, str(self._user_id), resource_data=operation_id, detect_abandonment=True) self._lock, status = results.args if status == Lock.FAILED_TO_ACQUIRE_LOCK: # Another server has the lock, so can't wait synchronously for the operations to complete. # TODO(Andy): We could poll the operations table if we want to support this. for operation_id in self._sync_cb_map.keys(): self._InvokeSyncCallbacks(operation_id, CannotWaitError, 'Cannot wait for the operation to complete, because another server ' 'owns the operation lock.') return try: next_ops = None if status == Lock.ACQUIRED_ABANDONED_LOCK and self._lock.resource_data is not None: # Execute the operation stored in lock.resource_data if it still exists. It is important # to continue with whatever operation was currently running when the abandon occurred. # This is because that operation may have only been partly complete. op = yield gen.Task(Operation.Query, self._client, self._user_id, self._lock.resource_data, col_names=None, must_exist=False, consistent_read=True) next_ops = [op] last_op_id = None while True: if next_ops is None: # Get 10 ops at a time, looking for one that is not in quarantine. # Use consistent reads, in order to avoid reading already deleted operations. We've # seen cases where an op runs, then deletes itself, but then an inconsistent read # gets an old version that hasn't yet been deleted and re-runs it. next_ops = yield gen.Task(Operation.RangeQuery, self._client, self._user_id, range_desc=None, limit=10, col_names=None, excl_start_key=last_op_id, consistent_read=True) if len(next_ops) == 0: # No more operations to process. break for op in next_ops: # Run the op if it is not in quarantine or if it's no longer in backoff. if not op.quarantine or not op.IsBackedOff(): yield self._ExecuteOp(op) # Look for next op to run; always run earliest op possible. last_op_id = None break else: # Skip quarantined operation. logging.info('queried quarantined operation "%s", user %d backed off for %.2fs; skipping...' % (op.operation_id, op.user_id, op.backoff - time.time())) last_op_id = op.operation_id next_ops = None finally: # Release the operation lock. yield gen.Task(self._lock.Release, self._client) if self._lock.acquire_failures is not None: # Another caller tried to acquire the lock, so there may be more operations available. logging.info('other servers tried to acquire lock "%s"; there may be more operations pending' % self._lock) self._requery = True
def _ExecuteOp(self, op): """Executes the operation by marshalling the JSON-encoded op data as arguments to the operation method. The execution of the operation is wrapped in an execution scope, which will capture all logging during the execution of this operation. """ # If necessary, wait until back-off has expired before execution begins. if op.backoff is not None: yield gen.Task(IOLoop.current().add_timeout, op.backoff) # Enter execution scope for this operation, so that it can be accessed in OpContext, and so that op-specific # logging will be started. with OpContext.current().Enter(op): op_entry = self._op_map[op.method] op_args = json.loads(op.json) # If not already done, update the lock to remember the id of the op that is being run. In # case of server failure, the server that takes over this lock will know where to start. if self._lock.resource_data != op.operation_id: self._lock.resource_data = op.operation_id yield gen.Task(self._lock.Update, self._client) # Migrate the arguments to the current server message version, as the format in the operations # table may be out-dated. Remove the headers object from the message, since it's not an # expected argument to the method. op_message = message.Message(op_args) yield gen.Task(op_message.Migrate, self._client, migrate_version=message.MAX_MESSAGE_VERSION, migrators=op_entry.migrators) try: del op_args['headers'] # Scrub the op args for logging in order to minimize personal information in the logs. scrubbed_op_args = op_args if op_entry.scrubber is not None: scrubbed_op_args = deepcopy(op_args) op_entry.scrubber(scrubbed_op_args) args_str = pprint.pformat(scrubbed_op_args) logging.info('EXECUTE: user: %d, device: %d, op: %s, method: %s:%s%s' % (op.user_id, op.device_id, op.operation_id, op.method, ('\n' if args_str.find('\n') != -1 else ' '), args_str)) _ops_per_min.increment() if op.attempts > 0: _retries_per_min.increment() # Starting operation from beginning, so reset modified db state in the # OpMgrDBClient wrapper so we'll know if any modifications happened before an abort. self._client.ResetDBModified() # Actually execute the operation by invoking its handler method. results = yield gen.Task(op_entry.handler, self._client, **op_args) # Invokes synchronous callback if applicable. elapsed_secs = time.time() - op.timestamp logging.info('SUCCESS: user: %d, device: %d, op: %s, method: %s in %.3fs%s' % (op.user_id, op.device_id, op.operation_id, op.method, elapsed_secs, (': %s' % pprint.pformat(results) if results else ''))) _avg_op_time.add(elapsed_secs) # Notify any waiting for op to finish that it's now complete. self._InvokeSyncCallbacks(op.operation_id) # Delete the op, now that it's been successfully executed. yield self._DeleteOp(op) except StopOperationError: # Stop the current operation in order to run a nested operation. pass except FailpointError: # Retry immediately if the operation is retried due to a failpoint. type, value, tb = sys.exc_info() logging.warning('restarting op due to failpoint: %s (%d)', value.filename, value.lineno) except Exception: type, value, tb = sys.exc_info() # Notify any waiting for op to finish that it failed (don't even wait for retries). self._InvokeSyncCallbacks(op.operation_id, type, value, tb) # Check for abortable exceptions, but only on 1st attempt. if op.attempts == 0 and issubclass(type, _ABORTABLE_EXCEPTIONS): yield self._AbortOp(op, type, value, tb) else: initial_backoff = UserOpManager._INITIAL_BACKOFF_SECS if issubclass(type, _SMALLER_RETRY_EXCEPTIONS): initial_backoff = UserOpManager._SMALL_INITIAL_BACKOFF_SECS yield self._FailOp(op, type, value, tb, initial_backoff_secs=initial_backoff)
def call_task(): # Note that there are no parens on some_function. # This will be translated by Task into # some_function(other_args, callback=callback) yield gen.Task(call_back, 'Hi')
def post(self): email = self.get_argument('s_email', '') profile_picture_id = self.get_argument('picture', None) country = self.get_argument('s_country', '') name = self.get_argument('s_name', '') site = self.get_argument('s_site', '') if country in countries.values(): exists_country = True elif country == "Unknown": exists_country = True else: exists_country = False if not email: response = { 'error': True, 'msg': 'Please enter your email address.' } elif not name: response = {'error': True, 'msg': 'Please enter your name.'} elif not country: response = {'error': True, 'msg': 'Please select your country.'} elif not exists_country: response = { 'error': True, 'msg': 'Please select your country from list of countries.' } elif not EMAIL_REGEX.match(email): response = {'error': True, 'msg': 'The email address is invalid.'} else: update_user = self.get_current_user() user = yield motor.Op(self.db.users.find_one, {"_id": ObjectId(update_user)}, { "_id": 0, "email": 1 }) update = { "name": name, } confirm_key = ''.join( random.choice(string.ascii_uppercase + string.digits) for x in range(32)) if user["email"] != email: update["waiting_email"] = email update["confirm_key"] = confirm_key new_email = EmailWrapper() new_email.set_subject( "Confirm your new email address at SoundSlash.com") new_email.set_sender("*****@*****.**") new_email.add_recipient(email) new_email.set_body("email/change_email.html", args={ "name": name, "key": confirm_key, "user_id": update_user }) yield gen.Task(self.unblock, function=new_email.send, parameters={}) if profile_picture_id is not None: update["picture"] = profile_picture_id yield motor.Op(self.db.images.update, {"_id": ObjectId(profile_picture_id)}, {"$set": { "tags": "profile" }}, upsert=False, multi=False) if country is not None: update["country"] = country if site is not None: update["site"] = site yield motor.Op(self.db.users.update, {"_id": ObjectId(update_user)}, {"$set": update}, upsert=False, multi=False) response = {'msg': 'Profile informations successfully saved.'} self.write(response) self.finish()
def get_image(self, callback, image_id): image_body = yield gen.Task(self._images_storage.fetch_image_by_id, image_id=image_id) callback(image_body)
def get(self, callback, image_id): image_dict = yield gen.Task(self._meta_data_storage.fetch_meta_data, image_id=image_id) callback(image_dict)
def test_sort(self): res = yield gen.Task(self.client.sort, 'a') self.assertEqual(res, []) yield gen.Task(self._make_list, 'a', '3214') res = yield gen.Task(self.client.sort, 'a') self.assertEqual(res, ['1', '2', '3', '4']) res = yield gen.Task(self.client.sort, 'a', start=1, num=2) self.assertEqual(res, ['2', '3']) res = yield gen.Task(self.client.set, 'score:1', 8) self.assertEqual(res, True) res = yield gen.Task(self.client.set, 'score:2', 3) self.assertEqual(res, True) res = yield gen.Task(self.client.set, 'score:3', 5) self.assertEqual(res, True) yield gen.Task(self._make_list, 'a_values', '123') res = yield gen.Task(self.client.sort, 'a_values', by='score:*') self.assertEqual(res, ['2', '3', '1']) res = yield gen.Task(self.client.set, 'user:1', 'u1') self.assertEqual(res, True) res = yield gen.Task(self.client.set, 'user:2', 'u2') self.assertEqual(res, True) res = yield gen.Task(self.client.set, 'user:3', 'u3') self.assertEqual(res, True) yield gen.Task(self._make_list, 'a', '231') res = yield gen.Task(self.client.sort, 'a', get='user:*') self.assertEqual(res, ['u1', 'u2', 'u3']) yield gen.Task(self._make_list, 'a', '231') res = yield gen.Task(self.client.sort, 'a', desc=True) self.assertEqual(res, ['3', '2', '1']) yield gen.Task(self._make_list, 'a', 'ecdba') res = yield gen.Task(self.client.sort, 'a', alpha=True) self.assertEqual(res, ['a', 'b', 'c', 'd', 'e']) yield gen.Task(self._make_list, 'a', '231') res = yield gen.Task(self.client.sort, 'a', store='sorted_values') self.assertEqual(res, 3) res = yield gen.Task(self.client.lrange, 'sorted_values', 0, -1) self.assertEqual(res, ['1', '2', '3']) yield gen.Task(self.client.set, 'user:1:username', 'zeus') yield gen.Task(self.client.set, 'user:2:username', 'titan') yield gen.Task(self.client.set, 'user:3:username', 'hermes') yield gen.Task(self.client.set, 'user:4:username', 'hercules') yield gen.Task(self.client.set, 'user:5:username', 'apollo') yield gen.Task(self.client.set, 'user:6:username', 'athena') yield gen.Task(self.client.set, 'user:7:username', 'hades') yield gen.Task(self.client.set, 'user:8:username', 'dionysus') yield gen.Task(self.client.set, 'user:1:favorite_drink', 'yuengling') yield gen.Task(self.client.set, 'user:2:favorite_drink', 'rum') yield gen.Task(self.client.set, 'user:3:favorite_drink', 'vodka') yield gen.Task(self.client.set, 'user:4:favorite_drink', 'milk') yield gen.Task(self.client.set, 'user:5:favorite_drink', 'pinot noir') yield gen.Task(self.client.set, 'user:6:favorite_drink', 'water') yield gen.Task(self.client.set, 'user:7:favorite_drink', 'gin') yield gen.Task(self.client.set, 'user:8:favorite_drink', 'apple juice') yield gen.Task(self._make_list, 'gods', '12345678') res = yield gen.Task(self.client.sort, 'gods', start=2, num=4, by='user:*:username', get='user:*:favorite_drink', desc=True, alpha=True, store='sorted') self.assertEqual(res, 4) res = yield gen.Task(self.client.lrange, 'sorted', 0, -1) self.assertEqual(res, ['vodka', 'milk', 'gin', 'apple juice']) self.stop()
def get(self): self.chunks.append('2') yield gen.Task(IOLoop.current().add_callback) self.chunks.append('3') yield gen.Task(IOLoop.current().add_callback) self.write(''.join(self.chunks))
def _MaybePut(path, contents, callback): if contents: yield gen.Task(merged_store.Put, path, contents) logging.info('Wrote %d bytes to %s' % (len(contents), path)) callback()
def prepare(self): yield gen.Task(IOLoop.current().add_callback) raise HTTPError(403)
def ProcessFiles(merged_store, logs_paths, filenames, callback): """Fetch and process each file contained in 'filenames'.""" def _ProcessOneFile(contents, day_stats, device_entries, trace_entries): """Iterate over the contents of a processed file: one entry per line. Increment stats for specific entries.""" buf = cStringIO.StringIO(contents) buf.seek(0) # Max len is +1 since we include the current line. It allows us to call 'continue' in the middle of the loop. context_before = deque(maxlen=options.options.trace_context_num_lines + 1) # Traces that still need "after" context. pending_traces = [] def _AddTrace(trace_type, timestamp, module, message): # context_before also has the current line, so grab only :-1. trace = {'type': trace_type, 'timestamp': timestamp, 'module': module, 'trace': msg, 'context_before': list(context_before)[:-1], 'context_after': []} if options.options.trace_context_num_lines == 0: trace_entries.append(trace) else: pending_traces.append(trace) def _CheckPendingTraces(line): for t in pending_traces: t['context_after'].append(line) while pending_traces and len(pending_traces[0]['context_after']) >= options.options.trace_context_num_lines: trace_entries.append(pending_traces.pop(0)) while True: line = buf.readline() if not line: break line = line.rstrip('\n') # The deque automatically pops elements from the front when maxlen is reached. context_before.append(line) _CheckPendingTraces(line) parsed = logs_util.ParseLogLine(line) if not parsed: continue day, time, module, msg = parsed timestamp = logs_util.DayTimeStringsToUTCTimestamp(day, time) if options.options.process_traceback and re.search(kTracebackRE, line): _AddTrace('traceback', timestamp, module, msg) if module.startswith('user_op_manager:') or module.startswith('operation:'): # Found op status line. if msg.startswith('SUCCESS'): # Success message. eg: SUCCESS: user: xx, device: xx, op: xx, method: xx.yy in xxs parsed = logs_util.ParseSuccessMsg(msg) if not parsed: continue user, device, op, class_name, method_name = parsed method = '%s.%s' % (class_name, method_name) day_stats.ActiveAll(user) if method in ('Follower.UpdateOperation', 'UpdateFollowerOperation.Execute'): day_stats.ActiveView(user) elif method in ('Comment.PostOperation', 'PostCommentOperation.Execute'): day_stats.ActivePost(user) elif method in ('Episode.ShareExistingOperation', 'Episode.ShareNewOperation', 'ShareExistingOperation.Execute', 'ShareNewOperation.Execute'): day_stats.ActiveShare(user) elif msg.startswith('EXECUTE'): # Exec message. eg: EXECUTE: user: xx, device: xx, op: xx, method: xx.yy: <req> parsed = logs_util.ParseExecuteMsg(msg) if not parsed: continue user, device, op, class_name, method_name, request = parsed method = '%s.%s' % (class_name, method_name) if method in ('Device.UpdateOperation', 'User.RegisterOperation', 'RegisterUserOperation.Execute'): try: req_dict = eval(request) device_entries.append({'method': method, 'timestamp': timestamp, 'request': req_dict}) except Exception as e: continue elif msg.startswith('ABORT'): if options.options.process_op_abort: # Abort message, save the entire line as well as context. _AddTrace('abort', timestamp, module, msg) # FAILURE status is already handled by Traceback processing. elif module.startswith('base:') and msg.startswith('/ping OK:'): # Ping message. Extract full request dict. req_str = logs_util.ParsePingMsg(msg) if not req_str: continue try: req_dict = json.loads(req_str) device_entries.append({'method': 'ping', 'timestamp': timestamp, 'request': req_dict}) except Exception as e: continue elif module.startswith('ping:') and msg.startswith('ping OK:'): # Ping message in new format. Extract full request and response dicts. (req_str, resp_str) = logs_util.ParseNewPingMsg(msg) if not req_str or not resp_str: continue try: req_dict = json.loads(req_str) resp_dict = json.loads(resp_str) device_entries.append({'method': 'ping', 'timestamp': timestamp, 'request': req_dict, 'response': resp_dict}) except Exception as e: continue # No more context. Flush the pending traces into the list. trace_entries.extend(pending_traces) buf.close() today = util.NowUTCToISO8601() # Group filenames by day. files_by_day = defaultdict(list) for filename in filenames: day = logs_paths.MergedLogPathToDate(filename) if not day: logging.error('filename cannot be parsed as processed log: %s' % filename) continue if options.options.compute_today or today != day: files_by_day[day].append(filename) # Sort the list of days. This is important both for --max_days_to_process, and to know the last # day for which we wrote the file. day_list = sorted(files_by_day.keys()) if options.options.max_days_to_process is not None: day_list = day_list[:options.options.max_days_to_process] last_day_written = None for day in day_list: files = files_by_day[day] day_stats = logs_util.DayUserRequestStats(day) device_entries = [] trace_entries = [] for f in files: # Let exceptions surface. contents = yield gen.Task(merged_store.Get, f) logging.info('Processing %d bytes from %s' % (len(contents), f)) _ProcessOneFile(contents, day_stats, device_entries, trace_entries) if not options.options.dry_run: # Write the json-ified stats. req_contents = json.dumps(day_stats.ToDotDict()) req_file_path = 'processed_data/user_requests/%s' % day dev_contents = json.dumps(device_entries) dev_file_path = 'processed_data/device_details/%s' % day try: trace_contents = json.dumps(trace_entries) except Exception as e: trace_contents = None trace_file_path = 'processed_data/traces/%s' % day @gen.engine def _MaybePut(path, contents, callback): if contents: yield gen.Task(merged_store.Put, path, contents) logging.info('Wrote %d bytes to %s' % (len(contents), path)) callback() yield [gen.Task(_MaybePut, req_file_path, req_contents), gen.Task(_MaybePut, dev_file_path, dev_contents), gen.Task(_MaybePut, trace_file_path, trace_contents)] last_day_written = day_stats.day callback(last_day_written) return
def inner(callback): yield gen.Task(self.io_loop.add_callback) 1 / 0
def outer(): for i in range(10): yield gen.Task(inner) stack_increase = len(stack_context._state.contexts) - initial_stack_depth self.assertTrue(stack_increase <= 2) self.stop()
def f(): yield gen.Task(self.io_loop.add_callback) raise gen.Return(42)
def tornado_coroutine(): yield gen.Task(self.io_loop.add_callback) raise gen.Return(42)
def f(): yield gen.Task(self.io_loop.add_timeout, self.io_loop.time() + 1)
def get(self): io_loop = self.request.connection.stream.io_loop client = AsyncHTTPClient(io_loop=io_loop) response = yield gen.Task(client.fetch, self.get_argument('url')) response.rethrow() self.finish(b"got response: " + response.body)
def test_set(self): res = yield gen.Task(self.client.set, 'foo', 'bar') self.assertEqual(res, True) self.stop()
def get(self): # This test depends on the order of the two decorators. io_loop = self.request.connection.stream.io_loop yield gen.Task(io_loop.add_callback) raise Exception("oops")
def test_hash(self): res = yield gen.Task(self.client.hmset, 'foo', {'a': 1, 'b': 2}) self.assertEqual(res, True) res = yield gen.Task(self.client.hgetall, 'foo') self.assertEqual(res, {'a': '1', 'b': '2'}) res = yield gen.Task(self.client.hdel, 'foo', 'a') self.assertEqual(res, True) res = yield gen.Task(self.client.hgetall, 'foo') self.assertEqual(res, {'b': '2'}) res = yield gen.Task(self.client.hget, 'foo', 'a') self.assertEqual(res, '') res = yield gen.Task(self.client.hget, 'foo', 'b') self.assertEqual(res, '2') res = yield gen.Task(self.client.hlen, 'foo') self.assertEqual(res, 1) res = yield gen.Task(self.client.hincrby, 'foo', 'b', 3) self.assertEqual(res, 5) res = yield gen.Task(self.client.hkeys, 'foo') self.assertEqual(res, ['b']) res = yield gen.Task(self.client.hvals, 'foo') self.assertEqual(res, ['5']) res = yield gen.Task(self.client.hset, 'foo', 'a', 1) self.assertEqual(res, True) res = yield gen.Task(self.client.hmget, 'foo', ['a', 'b']) self.assertEqual(res, {'a': '1', 'b': '5'}) res = yield gen.Task(self.client.hexists, 'foo', 'b') self.assertEqual(res, True) self.stop()
def prepare(self): self.chunks = [] yield gen.Task(IOLoop.current().add_callback) self.chunks.append('1')
def test_sets(self): res = yield gen.Task(self.client.smembers, 'foo') self.assertEqual(res, set()) res = yield gen.Task(self.client.sadd, 'foo', 'a') self.assertEqual(res, 1) res = yield gen.Task(self.client.sadd, 'foo', 'b') self.assertEqual(res, 1) res = yield gen.Task(self.client.sadd, 'foo', 'c') self.assertEqual(res, 1) res = yield gen.Task(self.client.srandmember, 'foo') self.assertIn(res, ['a', 'b', 'c']) res = yield gen.Task(self.client.scard, 'foo') self.assertEqual(res, 3) res = yield gen.Task(self.client.srem, 'foo', 'a') self.assertEqual(res, True) res = yield gen.Task(self.client.smove, 'foo', 'bar', 'b') self.assertEqual(res, True) res = yield gen.Task(self.client.smembers, 'bar') self.assertEqual(res, set(['b'])) res = yield gen.Task(self.client.sismember, 'foo', 'c') self.assertEqual(res, True) res = yield gen.Task(self.client.spop, 'foo') self.assertEqual(res, 'c') self.stop()
def test_zsets(self): res = yield gen.Task(self.client.zadd, 'foo', 1, 'a') self.assertEqual(res, 1) res = yield gen.Task(self.client.zadd, 'foo', 2.15, 'b') self.assertEqual(res, 1) res = yield gen.Task(self.client.zscore, 'foo', 'a') self.assertEqual(res, 1) res = yield gen.Task(self.client.zscore, 'foo', 'b') self.assertEqual(res, 2.15) res = yield gen.Task(self.client.zrank, 'foo', 'a') self.assertEqual(res, 0) res = yield gen.Task(self.client.zrank, 'foo', 'b') self.assertEqual(res, 1) res = yield gen.Task(self.client.zrevrank, 'foo', 'a') self.assertEqual(res, 1) res = yield gen.Task(self.client.zrevrank, 'foo', 'b') self.assertEqual(res, 0) res = yield gen.Task(self.client.zincrby, 'foo', 'a', 1) self.assertEqual(res, 2) res = yield gen.Task(self.client.zincrby, 'foo', 'b', 1) self.assertEqual(res, 3.15) res = yield gen.Task(self.client.zscore, 'foo', 'a') self.assertEqual(res, 2) res = yield gen.Task(self.client.zscore, 'foo', 'b') self.assertEqual(res, 3.15) res = yield gen.Task(self.client.zrange, 'foo', 0, -1, withscores=True) self.assertEqual(res, [('a', 2.0), ('b', 3.15)]) res = yield gen.Task(self.client.zrange, 'foo', 0, -1, False) self.assertEqual(res, ['a', 'b']) res = yield gen.Task(self.client.zrevrange, 'foo', 0, -1, True,) self.assertEqual(res, [('b', 3.15), ('a', 2.0)]) res = yield gen.Task(self.client.zrevrange, 'foo', 0, -1, False) self.assertEqual(res, ['b', 'a']) res = yield gen.Task(self.client.zcard, 'foo') self.assertEqual(res, 2) res = yield gen.Task(self.client.zadd, 'foo', 3.5, 'c') self.assertEqual(res, 1) res = yield gen.Task(self.client.zrangebyscore, 'foo', '-inf', '+inf', None, None, False) self.assertEqual(res, ['a', 'b', 'c']) res = yield gen.Task(self.client.zrevrangebyscore, 'foo', '+inf', '-inf', None, None, False) self.assertEqual(res, ['c', 'b', 'a']) res = yield gen.Task(self.client.zrangebyscore, 'foo', '2.1', '+inf', None, None, True) self.assertEqual(res, [('b', 3.15), ('c', 3.5)]) res = yield gen.Task(self.client.zrevrangebyscore, 'foo', '+inf', '2.1', None, None, True) self.assertEqual(res, [('c', 3.5), ('b', 3.15)]) res = yield gen.Task(self.client.zrangebyscore, 'foo', '-inf', '3.0', 0, 1, False) self.assertEqual(res, ['a']) res = yield gen.Task(self.client.zrangebyscore, 'foo', '-inf', '+inf', 1, 2, False) self.assertEqual(res, ['b', 'c']) res = yield gen.Task(self.client.zrevrangebyscore, 'foo', '+inf', '-inf', 1, 2, False) self.assertEqual(res, ['b', 'a']) res = yield gen.Task(self.client.delete, 'foo') self.assertEqual(res, True) res = yield gen.Task(self.client.zadd, 'foo', 1, 'a') self.assertEqual(res, 1) res = yield gen.Task(self.client.zadd, 'foo', 2, 'b') self.assertEqual(res, 1) res = yield gen.Task(self.client.zadd, 'foo', 3, 'c') self.assertEqual(res, 1) res = yield gen.Task(self.client.zadd, 'foo', 4, 'd') self.assertEqual(res, 1) res = yield gen.Task(self.client.zremrangebyrank, 'foo', 2, 4) self.assertEqual(res, 2) res = yield gen.Task(self.client.zremrangebyscore, 'foo', 0, 2) self.assertEqual(res, 2) res = yield gen.Task(self.client.zadd, 'a', 1, 'a1') self.assertEqual(res, 1) res = yield gen.Task(self.client.zadd, 'a', 1, 'a2') self.assertEqual(res, 1) res = yield gen.Task(self.client.zadd, 'a', 1, 'a3') self.assertEqual(res, 1) res = yield gen.Task(self.client.zadd, 'b', 2, 'a1') self.assertEqual(res, 1) res = yield gen.Task(self.client.zadd, 'b', 2, 'a3') self.assertEqual(res, 1) res = yield gen.Task(self.client.zadd, 'b', 2, 'a4') self.assertEqual(res, 1) res = yield gen.Task(self.client.zadd, 'c', 6, 'a1') self.assertEqual(res, 1) res = yield gen.Task(self.client.zadd, 'c', 5, 'a3') self.assertEqual(res, 1) res = yield gen.Task(self.client.zadd, 'c', 4, 'a4') self.assertEqual(res, 1) # ZINTERSTORE # sum, no weight res = yield gen.Task(self.client.zinterstore, 'z', ['a', 'b', 'c']) self.assertEqual(res, 2) res = yield gen.Task(self.client.zrange, 'z', 0, -1, withscores=True) self.assertEqual(res, [('a3', 8), ('a1', 9)]) # max, no weight res = yield gen.Task(self.client.zinterstore, 'z', ['a', 'b', 'c'], aggregate='MAX') self.assertEqual(res, 2) res = yield gen.Task(self.client.zrange, 'z', 0, -1, withscores=True) self.assertEqual(res, [('a3', 5), ('a1', 6)]) # with weight res = yield gen.Task(self.client.zinterstore, 'z', {'a': 1, 'b': 2, 'c': 3}) self.assertEqual(res, 2) res = yield gen.Task(self.client.zrange, 'z', 0, -1, withscores=True) self.assertEqual(res, [('a3', 20), ('a1', 23)]) # ZUNIONSTORE # sum, no weight res = yield gen.Task(self.client.zunionstore, 'z', ['a', 'b', 'c']) self.assertEqual(res, 4) res = yield gen.Task(self.client.zrange, 'z', 0, -1, withscores=True) self.assertEqual(dict(res), dict(a1=9, a2=1, a3=8, a4=6)) # max, no weight res = yield gen.Task(self.client.zunionstore, 'z', ['a', 'b', 'c'], aggregate='MAX') self.assertEqual(res, 4) res = yield gen.Task(self.client.zrange, 'z', 0, -1, withscores=True) self.assertEqual(dict(res), dict(a1=6, a2=1, a3=5, a4=4)) # with weight res = yield gen.Task(self.client.zunionstore, 'z', {'a': 1, 'b': 2, 'c': 3}) self.assertEqual(res, 4) res = yield gen.Task(self.client.zrange, 'z', 0, -1, withscores=True) self.assertEqual(dict(res), dict(a1=23, a2=1, a3=20, a4=16)) self.stop()