def _testSortBy(self): res = self.cmd('ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE', 'sum', 1, '@price', 'as', 'price', 'SORTBY', 2, '@price', 'desc', 'LIMIT', '0', '2') self.assertListEqual([long(292), ['brand', '', 'price', '44780.69'], [ 'brand', 'mad catz', 'price', '3973.48']], res) res = self.cmd('ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE', 'sum', 1, '@price', 'as', 'price', 'SORTBY', 2, '@price', 'asc', 'LIMIT', '0', '2') self.assertListEqual([long(292), ['brand', 'myiico', 'price', '0.23'], [ 'brand', 'crystal dynamics', 'price', '0.25']], res) # Test MAX with limit higher than it res = self.cmd('ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE', 'sum', 1, '@price', 'as', 'price', 'SORTBY', 2, '@price', 'asc', 'MAX', 2, 'LIMIT', '0', '10') self.assertListEqual([long(292), ['brand', 'myiico', 'price', '0.23'], [ 'brand', 'crystal dynamics', 'price', '0.25']], res) # Test Sorting by multiple properties res = self.cmd('ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE', 'sum', 1, '@price', 'as', 'price', 'APPLY', '(@price % 10)', 'AS', 'price', 'SORTBY', 4, '@price', 'asc', '@brand', 'desc', 'MAX', 10, ) self.assertListEqual([long(292), ['brand', 'zps', 'price', '0'], ['brand', 'zalman', 'price', '0'], ['brand', 'yoozoo', 'price', '0'], ['brand', 'white label', 'price', '0'], ['brand', 'stinky', 'price', '0'], [ 'brand', 'polaroid', 'price', '0'], ['brand', 'plantronics', 'price', '0'], ['brand', 'ozone', 'price', '0'], ['brand', 'oooo', 'price', '0'], ['brand', 'neon', 'price', '0']], res)
def testSynonymGroupWithThreeSynonyms(self): with self.redis() as r: r.flushdb() self.assertOk( r.execute_command('ft.create', 'idx', 'schema', 'title', 'text', 'body', 'text')) self.assertEqual( r.execute_command('ft.synadd', 'idx', 'boy', 'child', 'offspring'), 0) self.assertOk( r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields', 'title', 'he is a boy', 'body', 'this is a test')) res = r.execute_command('ft.search', 'idx', 'child', 'EXPANDER', 'SYNONYM') self.assertEqual(res, [ long(1), 'doc1', ['title', 'he is a boy', 'body', 'this is a test'] ]) res = r.execute_command('ft.search', 'idx', 'offspring', 'EXPANDER', 'SYNONYM') self.assertEqual(res, [ long(1), 'doc1', ['title', 'he is a boy', 'body', 'this is a test'] ])
def testSynonymsRdb(self): with self.redis() as r: r.flushdb() self.assertOk( r.execute_command('ft.create', 'idx', 'schema', 'title', 'text', 'body', 'text')) self.assertEqual( r.execute_command('ft.synadd', 'idx', 'boy', 'child', 'offspring'), 0) for _ in self.client.retry_with_rdb_reload(): self.assertEqual(r.execute_command('ft.syndump', 'idx'), [ 'offspring', [long(0)], 'child', [long(0)], 'boy', [long(0)] ])
def testSummarization(self): # Load the file self.setupGenesis() res = self.cmd('FT.SEARCH', 'idx', 'abraham isaac jacob', 'SUMMARIZE', 'FIELDS', 1, 'txt', 'LEN', 20, 'HIGHLIGHT', 'FIELDS', 1, 'txt', 'TAGS', '<b>', '</b>') self.assertEqual(1, res[0]) # print res res_txt = res[2][1] # print res_txt self.assertTrue("<b>Abraham</b>" in res_txt) self.assertTrue("<b>Isaac</b>" in res_txt) self.assertTrue("<b>Jacob</b>" in res_txt) res = self.cmd('FT.SEARCH', 'idx', 'abraham isaac jacob', 'HIGHLIGHT', 'fields', 1, 'txt', 'TAGS', '<i>', '</i>') res_txt = res[2][1] self.assertGreaterEqual(len(res_txt), 160000) res = self.cmd('FT.SEARCH', 'idx', 'abraham isaac jacob', 'SUMMARIZE', 'FIELDS', 1, 'txt', 'FRAGS', 10000) # print res res_list = res[2][1] #self.assertIsInstance(res_list, list) # Search with custom separator res = self.cmd('FT.SEARCH', 'idx', 'isaac', 'SUMMARIZE', 'FIELDS', 1, 'txt', 'SEPARATOR', '\r\n', 'FRAGS', 4, 'LEN', 3) res[2] = [safe_unicode(x) for x in res[2]] self.assertEqual([ long(1), u'gen1', [ u'txt', u'name Isaac: and\r\nwith Isaac,\r\nIsaac. {21:4} And Abraham circumcised his son Isaac\r\nson Isaac was\r\n' ] ], res) # Attempt a query which doesn't have a corresponding matched term res = self.cmd('FT.SEARCH', 'idx', '-blah', 'SUMMARIZE', 'LEN', 3) self.assertEqual(long(1), res[0]) self.assertEqual('gen1', res[1]) res[2] = [safe_unicode(x) for x in res[2]] self.assertTrue( u'The First Book of Moses, called Genesis {1:1}' in res[2][1]) # Try the same, but attempting to highlight res = self.cmd('FT.SEARCH', 'idx', '-blah', 'HIGHLIGHT') res[2] = [safe_unicode(x) for x in res[2]] self.assertTrue(215000 >= len(res[2][1]) >= 211000)
def testCn(self): text = open(SRCTEXT).read() self.cmd('ft.create', 'idx', 'schema', 'txt', 'text') self.cmd('ft.add', 'idx', 'doc1', 1.0, 'LANGUAGE', 'CHINESE', 'FIELDS', 'txt', text) res = self.cmd('ft.search', 'idx', '之旅', 'SUMMARIZE', 'HIGHLIGHT', 'LANGUAGE', 'chinese') res[2] = [safe_unicode(x) for x in res[2]] #self.assertEqual([long(1), 'doc1', ['txt', '2009\xe5\xb9\xb4\xef\xbc\x98\xe6\x9c\x88\xef\xbc\x96\xe6\x97\xa5\xe5\xbc\x80\xe5\xa7\x8b\xe5\xa4\xa7\xe5\xad\xa6<b>\xe4\xb9\x8b\xe6\x97\x85</b>\xef\xbc\x8c\xe5\xb2\xb3\xe9\x98\xb3\xe4\xbb\x8a\xe5\xa4\xa9\xe7\x9a\x84\xe6\xb0\x94\xe6\xb8\xa9\xe4\xb8\xba38.6\xe2\x84\x83, \xe4\xb9\x9f\xe5\xb0\xb1\xe6\x98\xaf101.48\xe2\x84\x89... \xef\xbc\x8c \xe5\x8d\x95\xe4\xbd\x8d \xe5\x92\x8c \xe5\x85\xa8\xe8\xa7\x92 : 2009\xe5\xb9\xb4 8\xe6\x9c\x88 6\xe6\x97\xa5 \xe5\xbc\x80\xe5\xa7\x8b \xe5\xa4\xa7\xe5\xad\xa6 <b>\xe4\xb9\x8b\xe6\x97\x85</b> \xef\xbc\x8c \xe5\xb2\xb3\xe9\x98\xb3 \xe4\xbb\x8a\xe5\xa4\xa9 \xe7\x9a\x84 \xe6\xb0\x94\xe6\xb8\xa9 \xe4\xb8\xba 38.6\xe2\x84\x83 , \xe4\xb9\x9f\xe5\xb0\xb1\xe6\x98\xaf 101... ')]], res) res = self.cmd('ft.search', 'idx', 'hacker', 'summarize', 'highlight') res[2] = [safe_unicode(x) for x in res[2]] #self.assertEqual([long(1), 'doc1', ['txt', ' visit http://code.google.com/p/jcseg, we all admire the <b>hacker</b> spirit!\xe7\x89\xb9\xe6\xae\x8a\xe6\x95\xb0\xe5\xad\x97: \xe2\x91\xa0 \xe2\x91\xa9 \xe2\x91\xbd \xe3\x88\xa9. ... p / jcseg , we all admire appreciate like love enjoy the <b>hacker</b> spirit mind ! \xe7\x89\xb9\xe6\xae\x8a \xe6\x95\xb0\xe5\xad\x97 : \xe2\x91\xa0 \xe2\x91\xa9 \xe2\x91\xbd \xe3\x88\xa9 . ~~~ ... ']], res) # Check that we can tokenize english with friso (sub-optimal, but don't want gibberish) gentxt = open(GENTXT).read() self.cmd('ft.add', 'idx', 'doc2', 1.0, 'LANGUAGE', 'chinese', 'FIELDS', 'txt', gentxt) res = self.cmd('ft.search', 'idx', 'abraham', 'summarize', 'highlight') self.assertEqual(long(1), res[0]) self.assertEqual('doc2', res[1]) res[2] = [safe_unicode(x) for x in res[2]] self.assertTrue(u'<b>Abraham</b>' in res[2][1]) # Add an empty document. Hope we don't crash! self.cmd('ft.add', 'idx', 'doc3', 1.0, 'language', 'chinese', 'fields', 'txt1', '') # Check splitting. TODO - see how to actually test for matches self.cmd('ft.search', 'idx', 'redis客户端', 'language', 'chinese') self.cmd('ft.search', 'idx', '简介Redisson 是一个高级的分布式协调Redis客户端', 'language', 'chinese')
def testSummarizationMultiField(self): p1 = "Redis is an open-source in-memory database project implementing a networked, in-memory key-value store with optional durability. Redis supports different kinds of abstract data structures, such as strings, lists, maps, sets, sorted sets, hyperloglogs, bitmaps and spatial indexes. The project is mainly developed by Salvatore Sanfilippo and is currently sponsored by Redis Labs.[4] Redis Labs creates and maintains the official Redis Enterprise Pack." p2 = "Redis typically holds the whole dataset in memory. Versions up to 2.4 could be configured to use what they refer to as virtual memory[19] in which some of the dataset is stored on disk, but this feature is deprecated. Persistence is now achieved in two different ways: one is called snapshotting, and is a semi-persistent durability mode where the dataset is asynchronously transferred from memory to disk from time to time, written in RDB dump format. Since version 1.1 the safer alternative is AOF, an append-only file (a journal) that is written as operations modifying the dataset in memory are processed. Redis is able to rewrite the append-only file in the background in order to avoid an indefinite growth of the journal." self.cmd('FT.CREATE', 'idx', 'SCHEMA', 'txt1', 'TEXT', 'txt2', 'TEXT') self.cmd('FT.ADD', 'idx', 'redis', 1.0, 'FIELDS', 'txt1', p1, 'txt2', p2) # Now perform the multi-field search self.cmd('FT.SEARCH', 'idx', 'memory persistence salvatore', 'HIGHLIGHT', 'TAGS', '<b>', '</b>', 'SUMMARIZE', 'LEN', 5, 'RETURN', 2, 'txt1', 'txt2') # Now perform the multi-field search res = self.cmd('FT.SEARCH', 'idx', 'memory persistence salvatore', 'SUMMARIZE', 'FIELDS', 2, 'txt1', 'txt2', 'LEN', 5) # print res self.assertEqual(long(1), res[0]) self.assertEqual('redis', res[1]) res[2] = [safe_unicode(x) for x in res[2]] self.assertTrue(u'txt1' in res[2]) self.assertTrue( u'memory database project implementing a networked, in-memory ... by Salvatore Sanfilippo... ' in res[2]) self.assertTrue(u'txt2' in res[2]) self.assertTrue( u'dataset in memory. Versions... as virtual memory[19] in... persistent durability mode where the dataset is asynchronously transferred from memory... ' in res[2])
def testSynonymUpdate(self): with self.redis() as r: r.flushdb() self.assertOk( r.execute_command('ft.create', 'idx', 'schema', 'title', 'text', 'body', 'text')) self.assertEqual( r.execute_command('ft.synadd', 'idx', 'boy', 'child', 'offspring'), 0) self.assertOk( r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields', 'title', 'he is a baby', 'body', 'this is a test')) self.assertOk(r.execute_command('ft.synupdate', 'idx', '0', 'baby')) self.assertOk( r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields', 'title', 'he is another baby', 'body', 'another test')) res = r.execute_command('ft.search', 'idx', 'child', 'EXPANDER', 'SYNONYM') # synonyms are applied from the moment they were added, previuse docs are not reindexed self.assertEqual(res, [ long(1), 'doc2', ['title', 'he is another baby', 'body', 'another test'] ])
def testSynonymsIntensiveLoad(self): iterations = 1000 with self.redis() as r: r.flushdb() self.assertOk( r.execute_command('ft.create', 'idx', 'schema', 'title', 'text', 'body', 'text')) for i in range(iterations): self.assertEqual( r.execute_command('ft.synadd', 'idx', 'boy%d' % i, 'child%d' % i, 'offspring%d' % i), i) for i in range(iterations): self.assertOk( r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields', 'title', 'he is a boy%d' % i, 'body', 'this is a test')) for _ in self.client.retry_with_rdb_reload(): for i in range(iterations): res = r.execute_command('ft.search', 'idx', 'child%d' % i, 'EXPANDER', 'SYNONYM') self.assertEqual(res, [ long(1), 'doc%d' % i, [ 'title', 'he is a boy%d' % i, 'body', 'this is a test' ] ])
def _testNoGroup(self): res = self.cmd('ft.aggregate', 'games', '*', 'LOAD', '2', '@brand', '@price', 'APPLY', 'floor(sqrt(@price)) % 10', 'AS', 'price', 'SORTBY', 4, '@price', 'desc', '@brand', 'desc', 'MAX', 5, ) self.assertListEqual([long(2265), ['brand', 'Xbox', 'price', '9'], ['brand', 'Turtle Beach', 'price', '9'], [ 'brand', 'Trust', 'price', '9'], ['brand', 'SteelSeries', 'price', '9'], ['brand', 'Speedlink', 'price', '9']], res)
def testSummarizationNoSave(self): self.cmd('FT.CREATE', 'idx', 'SCHEMA', 'body', 'TEXT') self.cmd('FT.ADD', 'idx', 'doc', 1.0, 'NOSAVE', 'fields', 'body', 'hello world') res = self.cmd('FT.SEARCH', 'idx', 'hello', 'SUMMARIZE', 'RETURN', 1, 'body') # print res self.assertEqual([long(1), 'doc', ['body', None]], res)
def _testGroupBy(self): cmd = ['ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE', 'count', '0', 'AS', 'count', 'SORTBY', 2, '@count', 'desc', 'LIMIT', '0', '5' ] res = self.cmd(*cmd) self.assertIsNotNone(res) self.assertEqual([long(292), ['brand', '', 'count', '1518'], ['brand', 'mad catz', 'count', '43'], [ 'brand', 'generic', 'count', '40'], ['brand', 'steelseries', 'count', '37'], ['brand', 'logitech', 'count', '35']], res)
def read_response(self, command=None, index=0): response = self._buffer.readline() if not response: raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) byte, response = byte_to_chr(response[0]), response[1:] if byte not in ('-', '+', ':', '$', '*'): raise InvalidResponse("Protocol Error: %s, %s" % (str(byte), str(response))) # server returned an error if byte == '-': response = nativestr(response) error = self.parse_error(response) # if the error is a ConnectionError, raise immediately so the user # is notified if isinstance(error, ConnectionError): raise error # otherwise, we're dealing with a ResponseError that might belong # inside a pipeline response. the connection's read_response() # and/or the pipeline's execute() will raise this error if # necessary, so just return the exception instance here. return error # single value elif byte == '+': pass # int value elif byte == ':': response = long(response) # bulk response elif byte == '$': length = int(response) if length == -1: return None # TODO: IF command=HGETALL and it is value (odd indexed), then decode it if command == 'HGETALL' and index % 2 == 1: # print('Unpacking through here...') # start_time = time.time() # unpacker = msgpack.Unpacker(self._buffer, length) response = self._buffer.read_and_decode(length) # print('Time taken: {}'.format(time.time() - start_time)) else: response = self._buffer.read(length) # multi-bulk response elif byte == '*': length = int(response) if length == -1: return None response = [self.read_response(command, i) for i in xrange(length)] if isinstance(response, bytes): response = self.encoder.decode(response) return response
def read_response(self): try: response = self._buffer.readline() except: print("SC Debug: race condition that the _buffer is already released") return None if not response: raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) byte, response = byte_to_chr(response[0]), response[1:] if byte not in ('-', '+', ':', '$', '*'): raise InvalidResponse("Protocol Error: %s, %s" % (str(byte), str(response))) # server returned an error if byte == '-': response = nativestr(response) error = self.parse_error(response) # if the error is a ConnectionError, raise immediately so the user # is notified if isinstance(error, ConnectionError): raise error # otherwise, we're dealing with a ResponseError that might belong # inside a pipeline response. the connection's read_response() # and/or the pipeline's execute() will raise this error if # necessary, so just return the exception instance here. return error # single value elif byte == '+': pass # int value elif byte == ':': response = long(response) # bulk response elif byte == '$': length = int(response) if length == -1: return None response = self._buffer.read(length) # multi-bulk response elif byte == '*': length = int(response) if length == -1: return None response = [self.read_response() for i in xrange(length)] if isinstance(response, bytes): response = self.encoder.decode(response) return response
def _testFirstValue(self): res = self.cmd('ft.aggregate', 'games', '@brand:(sony|matias|beyerdynamic|(mad catz))', 'GROUPBY', 1, '@brand', 'REDUCE', 'FIRST_VALUE', 4, '@title', 'BY', '@price', 'DESC', 'AS', 'top_item', 'REDUCE', 'FIRST_VALUE', 4, '@price', 'BY', '@price', 'DESC', 'AS', 'top_price', 'REDUCE', 'FIRST_VALUE', 4, '@title', 'BY', '@price', 'ASC', 'AS', 'bottom_item', 'REDUCE', 'FIRST_VALUE', 4, '@price', 'BY', '@price', 'ASC', 'AS', 'bottom_price', 'SORTBY', 2, '@top_price', 'DESC', 'MAX', 5 ) self.assertListEqual([long(4), ['brand', 'sony', 'top_item', 'sony psp slim & lite 2000 console', 'top_price', '695.8', 'bottom_item', 'sony dlchd20p high speed hdmi cable for playstation 3', 'bottom_price', '5.88'], ['brand', 'matias', 'top_item', 'matias halfkeyboard usb', 'top_price', '559.99', 'bottom_item', 'matias halfkeyboard usb', 'bottom_price', '559.99'], ['brand', 'beyerdynamic', 'top_item', 'beyerdynamic mmx300 pc gaming premium digital headset with microphone', 'top_price', '359.74', 'bottom_item', 'beyerdynamic headzone pc gaming digital surround sound system with mmx300 digital headset with microphone', 'bottom_price', '0'], ['brand', 'mad catz', 'top_item', 'mad catz s.t.r.i.k.e.7 gaming keyboard', 'top_price', '295.95', 'bottom_item', 'madcatz mov4545 xbox replacement breakaway cable', 'bottom_price', '3.49']], res)
def testOverflow1(self): #"FT.CREATE" "netflix" "SCHEMA" "title" "TEXT" "WEIGHT" "1" "rating" "TEXT" "WEIGHT" "1" "level" "TEXT" "WEIGHT" "1" "description" "TEXT" "WEIGHT" "1" "year" "NUMERIC" "uscore" "NUMERIC" "usize" "NUMERIC" #FT.ADD" "netflix" "15ad80086ccc7f" "1" "FIELDS" "title" "The Vampire Diaries" "rating" "TV-14" "level" "Parents strongly cautioned. May be unsuitable for children ages 14 and under." "description" "90" "year" "2017" "uscore" "91" "usize" "80" self.cmd('FT.CREATE', 'netflix', 'SCHEMA', 'title', 'TEXT', 'rating', 'TEXT', 'leve', 'TEXT', 'description', 'TEXT', 'year', 'NUMERIC', 'uscore', 'NUMERIC', 'usize', 'NUMERIC') self.cmd( 'FT.ADD', "netflix", "15ad80086ccc7f", "1.0", "FIELDS", "title", "The Vampire Diaries", "rating", "TV-14", "level", "Parents strongly cautioned. May be unsuitable for children ages 14 and under.", "description", "90", "year", "2017", "uscore", "91", "usize", "80") res = self.cmd('ft.search', 'netflix', 'vampire', 'highlight') self.assertTrue(res[0] == long(1)) self.assertTrue(res[1] == u'15ad80086ccc7f') res[2] = [safe_unicode(x) for x in res[2]] self.assertTrue(u'The <b>Vampire</b> Diaries' in res[2])
def _testSplit(self): res = self.cmd('ft.aggregate', 'games', '*', 'APPLY', 'split("hello world, foo,,,bar,", ",", " ")', 'AS', 'strs', 'APPLY', 'split("hello world, foo,,,bar,", " ", ",")', 'AS', 'strs2', 'APPLY', 'split("hello world, foo,,,bar,", "", "")', 'AS', 'strs3', 'APPLY', 'split("hello world, foo,,,bar,")', 'AS', 'strs4', 'APPLY', 'split("hello world, foo,,,bar,",",")', 'AS', 'strs5', 'APPLY', 'split("")', 'AS', 'empty', 'LIMIT', '0', '1' ) self.assertListEqual([long(1), ['strs', ['hello world', 'foo', 'bar'], 'strs2', ['hello', 'world', 'foo,,,bar'], 'strs3', ['hello world, foo,,,bar,'], 'strs4', ['hello world', 'foo', 'bar'], 'strs5', ['hello world', 'foo', 'bar'], 'empty', []]], res)
def testPrefixExpansion(self): # Search with prefix self.setupGenesis() res = self.cmd('FT.SEARCH', 'idx', 'begi*', 'HIGHLIGHT', 'FIELDS', 1, 'txt', 'TAGS', '<b>', '</b>', 'SUMMARIZE', 'FIELDS', 1, 'txt', 'LEN', 20) res[2] = [safe_unicode(x) for x in res[2]] # Prefix expansion uses "early exit" strategy, so the term highlighted won't necessarily be the # best term self.assertEqual([ long(1), 'gen1', [ u'txt', 'is] one, and they have all one language; and this they <b>begin</b> to do: and now nothing will be restrained from them, which... ' ] ], res)
def read_response(self): response = self._buffer.readline() if not response: raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) byte, response = byte_to_chr(response[0]), response[1:] if byte not in ('-', '+', ':', '$', '*'): raise InvalidResponse("Protocol Error: %s, %s" % (str(byte), str(response))) # server returned an error if byte == '-': response = nativestr(response) error = self.parse_error(response) # if the error is a ConnectionError, raise immediately so the user # is notified if isinstance(error, ConnectionError): raise error # otherwise, we're dealing with a ResponseError that might belong # inside a pipeline response. the connection's read_response() # and/or the pipeline's execute() will raise this error if # necessary, so just return the exception instance here. return error # single value elif byte == '+': pass # int value elif byte == ':': response = long(response) # bulk response elif byte == '$': length = int(response) if length == -1: return None response = self._buffer.read(length) # multi-bulk response elif byte == '*': length = int(response) if length == -1: return None response = [self.read_response() for i in xrange(length)] if isinstance(response, bytes) and self.encoding: response = response.decode(self.encoding) response = convertToNumber(response) return response
def read_response(self): response = self.read() if not response: raise ConnectionError("Socket closed on remote end") byte, response = byte_to_chr(response[0]), response[1:] if byte not in ('-', '+', ':', '$', '*'): raise InvalidResponse("Protocol Error: %s, %s" % (str(byte), str(response))) # server returned an error if byte == '-': response = nativestr(response) error = self.parse_error(response) # if the error is a ConnectionError, raise immediately so the user # is notified if isinstance(error, ConnectionError): raise error # otherwise, we're dealing with a ResponseError that might belong # inside a pipeline response. the connection's read_response() # and/or the pipeline's execute() will raise this error if # necessary, so just return the exception instance here. return error # single value elif byte == '+': pass # int value elif byte == ':': response = long(response) # bulk response elif byte == '$': length = int(response) if length == -1: return None response = self.read(length) # multi-bulk response elif byte == '*': length = int(response) if length == -1: return None response = [self.read_response() for i in xrange(length)] if isinstance(response, bytes) and self.encoding: response = response.decode(self.encoding) return response
def _testSum(self): cmd = ['ft.aggregate', 'games', '*', 'GROUPBY', '1', '@brand', 'REDUCE', 'count', '0', 'AS', 'count', 'REDUCE', 'sum', 1, '@price', 'AS', 'sum(price)', 'SORTBY', 2, '@sum(price)', 'desc', 'LIMIT', '0', '5' ] res = self.cmd(*cmd) self.assertEqual([long(292), ['brand', '', 'count', '1518', 'sum(price)', '44780.69'], ['brand', 'mad catz', 'count', '43', 'sum(price)', '3973.48'], ['brand', 'razer', 'count', '26', 'sum(price)', '2558.58'], ['brand', 'logitech', 'count', '35', 'sum(price)', '2329.21'], ['brand', 'steelseries', 'count', '37', 'sum(price)', '1851.12']], res)
def read_response(self): response = self.read() if not response: raise ConnectionError("Socket closed on remote end") byte, response = byte_to_chr(response[0]), response[1:] if byte not in ('-', '+', ':', '$', '*'): raise InvalidResponse("Protocol Error") # server returned an error if byte == '-': if nativestr(response).startswith('LOADING '): # if we're loading the dataset into memory, kill the socket # so we re-initialize (and re-SELECT) next time. raise ConnectionError("Redis is loading data into memory") # if the error starts with ERR, trim that off if nativestr(response).startswith('ERR '): response = response[4:] # *return*, not raise the exception class. if it is meant to be # raised, it will be at a higher level. return ResponseError(response) # single value elif byte == '+': pass # int value elif byte == ':': response = long(response) # bulk response elif byte == '$': length = int(response) if length == -1: return None response = self.read(length) # multi-bulk response elif byte == '*': length = int(response) if length == -1: return None response = [self.read_response() for i in xrange(length)] if isinstance(response, bytes) and self.encoding: response = response.decode(self.encoding) return response
def _testTimeFunctions(self): cmd = ['FT.AGGREGATE', 'games', '*', 'APPLY', '1517417144', 'AS', 'dt', 'APPLY', 'timefmt(@dt)', 'AS', 'timefmt', 'APPLY', 'day(@dt)', 'AS', 'day', 'APPLY', 'hour(@dt)', 'AS', 'hour', 'APPLY', 'minute(@dt)', 'AS', 'minute', 'APPLY', 'month(@dt)', 'AS', 'month', 'APPLY', 'dayofweek(@dt)', 'AS', 'dayofweek', 'APPLY', 'dayofmonth(@dt)', 'AS', 'dayofmonth', 'APPLY', 'dayofyear(@dt)', 'AS', 'dayofyear', 'APPLY', 'year(@dt)', 'AS', 'year', 'LIMIT', '0', '1'] res = self.cmd(*cmd) self.assertListEqual([long(1), ['dt', '1517417144', 'timefmt', '2018-01-31T16:45:44Z', 'day', '1517356800', 'hour', '1517414400', 'minute', '1517417100', 'month', '1514764800', 'dayofweek', '3', 'dayofmonth', '31', 'dayofyear', '30', 'year', '2018']], res)
def testSynonymDump(self): with self.redis() as r: r.flushdb() self.assertOk( r.execute_command('ft.create', 'idx', 'schema', 'title', 'text', 'body', 'text')) self.assertEqual( r.execute_command('ft.synadd', 'idx', 'boy', 'child', 'offspring'), 0) self.assertEqual( r.execute_command('ft.synadd', 'idx', 'baby', 'child'), 1) self.assertEqual( r.execute_command('ft.synadd', 'idx', 'tree', 'wood'), 2) self.assertEqual(r.execute_command('ft.syndump', 'idx'), [ 'baby', [long(1)], 'offspring', [long(0)], 'wood', [long(2)], 'tree', [long(2)], 'child', [long(0), long(1)], 'boy', [long(0)] ])
def read_response(self): response = self.read() if not response: raise ConnectionError("Socket closed on remote end") byte, response = byte_to_chr(response[0]), response[1:] if byte not in ("-", "+", ":", "$", "*"): raise InvalidResponse("Protocol Error") # server returned an error if byte == "-": response = nativestr(response) if response.startswith("LOADING "): # if we're loading the dataset into memory, kill the socket # so we re-initialize (and re-SELECT) next time. raise ConnectionError("Redis is loading data into memory") # *return*, not raise the exception class. if it is meant to be # raised, it will be at a higher level. return self.parse_error(response) # single value elif byte == "+": pass # int value elif byte == ":": response = long(response) # bulk response elif byte == "$": length = int(response) if length == -1: return None response = self.read(length) # multi-bulk response elif byte == "*": length = int(response) if length == -1: return None response = [self.read_response() for i in xrange(length)] if isinstance(response, bytes) and self.encoding: response = response.decode(self.encoding) return response
def testSummarizationMeta(self): self.cmd('ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'text', 'baz', 'text') self.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'foo', 'pill', 'bar', 'pillow', 'baz', 'piller') # Now, return the fields: res = self.cmd('ft.search', 'idx', 'pill pillow piller', 'RETURN', 1, 'baz', 'SUMMARIZE', 'FIELDS', 2, 'foo', 'bar') self.assertEqual(1, res[0]) result = res[2] names = [x[0] for x in grouper(result, 2)] # RETURN restricts the number of fields self.assertEqual(set(('baz', )), set(names)) res = self.cmd('ft.search', 'idx', 'pill pillow piller', 'RETURN', 3, 'foo', 'bar', 'baz', 'SUMMARIZE') self.assertEqual([ long(1), 'doc1', ['foo', 'pill... ', 'bar', 'pillow... ', 'baz', 'piller... '] ], res)
def read_response(self): response = self.read() if not response: raise ConnectionError("Socket closed on remote end") byte, response = byte_to_chr(response[0]), response[1:] if byte not in ('-', '+', ':', '$', '*'): raise InvalidResponse("Protocol Error") # server returned an error if byte == '-': response = nativestr(response) # *return*, not raise the exception class. if it is meant to be # raised, it will be at a higher level. return self.parse_error(response) # single value elif byte == '+': pass # int value elif byte == ':': response = long(response) # bulk response elif byte == '$': length = int(response) if length == -1: return None response = self.read(length) # multi-bulk response elif byte == '*': length = int(response) if length == -1: return None response = [self.read_response() for i in xrange(length)] if isinstance(response, bytes) and self.encoding: response = response.decode(self.encoding) return response
def _parse_scan(self, response, **options): """ Borrowed from redis-py::client.py """ cursor, r = response return long(cursor), r
def read_response(self): ''' Reads one line from the wire, and interprets it. Example: the acknowledgment to an unsubscribe from topic myTopic on the wire looks like this: *3\r\n$11\r\nUNSUBSCRIBE\r\n$7\r\nmyTopic\r\n:1\r\n' *3 # three items to follow $11 # string of 11 chars UNSUBSCRIBE $7 # string of 7 chars myTopic :1 # one topic subscribed to now Each line will cause a recursive call to this method (see elif byte == '*' below). Simpler calls will be individual elements, such as ':12', which returns the integer 12. These are the possible prefixes; each item is followed by a \r\n, which is stripped by SocketLineReader: +<str> simple string :<int> integer $<n> string of length <n> *<num> start of array with <num> elements When the message to parse is the acknowledgment of a SUBSCRIBE or UNSUBSCRIBE command, this method will set() event self.unsubscribeAckEvent/self.unsubscribeAckEvent. :return: response string :rtype: string ''' response = self._buffer.readline() if not response: raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR) byte, response = byte_to_chr(response[0]), response[1:] if byte not in ('-', '+', ':', '$', '*'): raise InvalidResponse("Protocol Error: %s, %s" % (str(byte), str(response))) # server returned an error if byte == '-': response = nativestr(response) error = self.parse_error(response) # if the error is a ConnectionError, raise immediately so the user # is notified if isinstance(error, ConnectionError): raise error # otherwise, we're dealing with a ResponseError that might belong # inside a pipeline response. the connection's read_response() # and/or the pipeline's execute() will raise this error if # necessary, so just return the exception instance here. return error # simple-string: response holds result: elif byte == '+': pass # int value elif byte == ':': response = long(response) # bulk response elif byte == '$': length = int(response) if length == -1: # Null string: return None response = self._buffer.read(length) # multi-bulk response elif byte == '*': length = int(response) if length == -1: return None response = [self.read_response() for _ in xrange(length)] if isinstance(response, bytes) and self.encoding: response = response.decode(self.encoding) #*********** #print('Response: %s' % byte + '|' + str(response)) #*********** return response
def _testLoad(self): res = self.cmd('ft.aggregate', 'games', '*', 'LOAD', '3', '@brand', '@price', '@nonexist', 'LIMIT', 0, 5 ) self.assertListEqual([long(1), ['brand', 'Dark Age Miniatures', 'price', '31.23', 'nonexist', None], ['brand', 'Palladium Books', 'price', '9.55', 'nonexist', None], [ 'brand', '', 'price', '0', 'nonexist', None], ['brand', 'Evil Hat Productions', 'price', '15.48', 'nonexist', None], ['brand', 'Fantasy Flight Games', 'price', '33.96', 'nonexist', None]], res)