Example #1
0
    def read(self, exchange, dtype, pair):
        key = f'{dtype}-{exchange}-{pair}'

        data = self.conn.xread(
            {key: '0-0' if key not in self.last_id else self.last_id[key]})

        if len(data) == 0:
            return []

        LOG.info("%s: Read %d messages from Redis", key, len(data[0][1]))
        ret = []

        for update_id, update in data[0][1]:
            if dtype in {L2_BOOK, L3_BOOK}:
                update = json.loads(update['data'])
                update = book_flatten(update, update['timestamp'],
                                      update['delta'])
                for u in update:
                    for k in ('size', 'amount', 'price', 'timestamp'):
                        if k in u:
                            u[k] = float(u[k])
                ret.extend(update)
            if dtype == TRADES:
                for k in ('size', 'amount', 'price', 'timestamp'):
                    if k in update:
                        update[k] = float(update[k])
                ret.append(update)
            self.ids[key].append(update_id)

        self.last_id[key] = self.ids[key][-1]
        return ret
Example #2
0
    def read(self, exchange, dtype, pair):
        key = f'{dtype}-{exchange}-{pair}'
        data = self._conn(key).consume(1000000, timeout=0.5)
        LOG.info("%s: Read %d messages from Kafka", key, len(data))
        ret = []
        for message in data:
            self.ids[key] = message
            update = json.loads(message.value().decode('utf8'))

            if dtype in {L2_BOOK, L3_BOOK}:
                update = book_flatten(update, update['timestamp'], update['delta'])
                ret.extend(update)
            if dtype in {TRADES, TICKER}:
                ret.append(update)
        return ret
Example #3
0
    def read(self, exchange, dtype, pair, start=None, end=None):
        key = f'{dtype}-{exchange}-{pair}'

        if start and end:
            data = [[key, self.conn.xrange(key, min=start, max=end)]]
        elif self.batch_size:
            data = [[key, self.conn.xrange(key, count=self.batch_size)]]
        else:
            data = self.conn.xread(
                {key: '0-0' if key not in self.last_id else self.last_id[key]})

        if len(data) == 0 or len(data[0][1]) == 0:
            return []

        LOG.info("%s: Read %d messages from Redis", key, len(data[0][1]))
        ret = []

        for update_id, update in data[0][1]:
            if dtype in {L2_BOOK, L3_BOOK}:
                update = json.loads(update['data'])
                update = book_flatten(update, update['timestamp'],
                                      update['receipt_timestamp'],
                                      update['delta'])
                for u in update:
                    for k in ('size', 'amount', 'price', 'timestamp',
                              'receipt_timestamp'):
                        if k in u:
                            u[k] = float(u[k])
                ret.extend(update)
            elif dtype in {TRADES, TICKER, OPEN_INTEREST}:
                for k in ('size', 'amount', 'price', 'timestamp',
                          'receipt_timestamp', 'bid', 'ask', 'open_interest'):
                    if k in update:
                        update[k] = float(update[k])
                ret.append(update)
            elif dtype == FUNDING:
                for k in update:
                    try:
                        update[k] = float(update[k])
                    except ValueError:
                        # ignore strings
                        pass
                ret.append(update)
            self.ids[key].append(update_id)

        self.last_id[key] = self.ids[key][-1]
        return ret
Example #4
0
    def read(self, exchange, dtype, pair, start=None, end=None):
        kafka = StorageEngines.confluent_kafka
        key = f'{dtype}-{exchange}-{pair}'

        if start and end:
            start_offset = self._conn(key).offsets_for_times(
                [kafka.TopicPartition(key, 0, start)])[0]
            stop_offset = self._conn(key).offsets_for_times(
                [kafka.TopicPartition(key, 0, end)])[0]
            if start_offset.offset == -1:
                return []

            self._conn(key).assign([start_offset])
            offset_diff = stop_offset.offset - start_offset.offset
            if offset_diff <= 0:
                return []

            data = self._conn(key).consume(offset_diff)
            self._conn(key).unassign()
        else:
            data = self._conn(key).consume(1000000, timeout=0.5)

        LOG.info("%s: Read %d messages from Kafka", key, len(data))
        ret = []

        for message in data:
            self.ids[key] = message
            msg = message.value().decode('utf8')
            try:
                update = json.loads(msg)
            except Exception:
                if 'Subscribed topic not available' in msg:
                    return ret
            if dtype in {L2_BOOK, L3_BOOK}:
                update = book_flatten(update, update['timestamp'],
                                      update['delta'],
                                      update['receipt_timestamp'])
                ret.extend(update)
            if dtype in {TRADES, TICKER, FUNDING, OPEN_INTEREST}:
                ret.append(update)

        return ret