Beispiel #1
0
    def test_put_tuple_record_success(self):
        project_name = 'put'
        topic_name = 'success'
        record_schema = RecordSchema.from_lists(
            ['bigint_field', 'string_field', 'double_field', 'bool_field', 'time_field'],
            [FieldType.BIGINT, FieldType.STRING, FieldType.DOUBLE, FieldType.BOOLEAN, FieldType.TIMESTAMP])

        records = []
        record0 = TupleRecord(schema=record_schema, values=[1, 'yc1', 10.01, True, 253402271999000000])
        record0.shard_id = '0'
        record0.shard_id = '0'
        records.append(record0)

        record1 = TupleRecord(schema=record_schema)
        record1.values = [-9223372036854775808, 'yc1', 10.01, True, -62135798400000000]
        record1.hash_key = '4FFFFFFFFFFFFFFD7FFFFFFFFFFFFFFD'
        records.append(record1)

        record2 = TupleRecord(schema=record_schema, values=[9223372036854775807, 'yc1', 10.01, True, 1455869335000000])
        record2.set_value(0, 9223372036854775807)
        record2.set_value('string_field', 'yc1')
        record2.partition_key = 'TestPartitionKey'
        records.append(record2)

        with HTTMock(datahub_api_mock):
            put_result = dh.put_records(project_name, topic_name, records)

        assert put_result.failed_record_count == 0
        assert put_result.failed_records == []
Beispiel #2
0
 def pub_topic(self):
     #logger.debug(self.to_string() + "pub_topic() self.symbols_analyze={0}".format(self.symbols_analyze))
     if self.userid != 0:
         return
     topic_name = "t_symbols_analyze"
     topic, shards = g_datahub.get_topic(topic_name)
     record = TupleRecord(schema=topic.record_schema)
     record.values = [
         self.symbols_analyze.f_ex_id, self.symbols_analyze.f_symbol,
         self.symbols_analyze.f_timeframe, self.symbols_analyze.f_bid,
         self.symbols_analyze.f_ask, self.symbols_analyze.f_spread,
         self.symbols_analyze.f_bar_trend,
         self.symbols_analyze.f_volume_mean, self.symbols_analyze.f_volume,
         self.symbols_analyze.f_ma_period, self.symbols_analyze.f_ma_up,
         self.symbols_analyze.f_ma_low, self.symbols_analyze.f_ma_trend,
         self.symbols_analyze.f_channel_period,
         self.symbols_analyze.f_channel_up,
         self.symbols_analyze.f_channel_low,
         self.symbols_analyze.f_breakout_trend,
         self.symbols_analyze.f_breakout_ts,
         self.symbols_analyze.f_breakout_price,
         self.symbols_analyze.f_breakout_volume,
         self.symbols_analyze.f_breakout_volume_rate,
         self.symbols_analyze.f_breakout_price_highest,
         self.symbols_analyze.f_breakout_price_highest_ts,
         self.symbols_analyze.f_breakout_rate,
         self.symbols_analyze.f_breakout_rate_max,
         arrow.utcnow().timestamp * 1000
     ]
     record.shard_id = shards[randint(1, 1000) % len(shards)].shard_id
     g_datahub.pub_topic(topic_name, [record])
Beispiel #3
0
    def fetch_ticker_2_datahub(self, msg):
        #logger.debug(self.to_string() + 'callback_ticker() msg={0}'.format(msg))
        topic, shards = self.topic['t_ticker']
        records = []
        for t in msg:
            ticker = self.ticker_2_ccxt(t)
            symbol = ticker['symbol']
            f_ts = ticker['timestamp'] and ticker['timestamp'] or int(
                arrow.utcnow().timestamp * 1000)
            f_bid = ticker['bid'] and ticker['bid'] or 0
            f_bid_volume = ticker['bidVolume'] and ticker['bidVolume'] or 0
            f_ask = ticker['ask'] and ticker['ask'] or 0
            f_ask_volume = ticker['askVolume'] and ticker['askVolume'] or 0
            f_vwap = ticker['vwap'] and ticker['vwap'] or 0
            f_open = ticker['open'] and ticker['open'] or 0
            f_high = ticker['high'] and ticker['high'] or 0
            f_low = ticker['low'] and ticker['low'] or 0
            f_close = ticker['close'] and ticker['close'] or 0
            f_last = ticker['last'] and ticker['last'] or 0
            f_previous_close = ticker['previousClose'] and ticker[
                'previousClose'] or 0
            f_change = ticker['change'] and ticker['change'] or 0
            f_percentage = ticker['percentage'] and ticker['percentage'] or 0
            f_average = ticker['average'] and ticker['average'] or 0
            f_base_volume = ticker['baseVolume'] and ticker['baseVolume'] or 0
            f_quote_volume = ticker['quoteVolume'] and ticker[
                'quoteVolume'] or 0
            f_ts_update = arrow.utcnow().timestamp
            v = [
                ws_binance.ex_id, symbol, f_ts, f_bid, f_bid_volume, f_ask,
                f_ask_volume, f_vwap, f_open, f_high, f_low, f_close, f_last,
                f_previous_close, f_change, f_percentage, f_average,
                f_base_volume, f_quote_volume, f_ts_update
            ]
            record = TupleRecord(schema=topic.record_schema)
            record.values = v
            i = random.randint(1, 100) % len(shards)
            record.shard_id = shards[i].shard_id
            #logger.debug(self.to_string() + 'callback_ticker()  record={}'.format(record))
            records.append(record)

        logger.debug(self.to_string() +
                     'callback_ticker() len(records)={0}'.format(len(records)))
        try:
            g_datahub.pub_topic('t_ticker', records)
        except Exception as e:
            logger.info(self.to_string() + 'callback_ticker() e={0}'.format(e))
Beispiel #4
0
 async def fetch_ticker(self, ex_id, topic, shards, symbol):
     self.init_exchange(ex_id)
     #logger.debug(self.to_string() + "fetch_ticker({0})".format(ex_id))
     records = []
     ticker = await self.exchanges[ex_id].ex.fetch_ticker(symbol)
     f_ts = ticker['timestamp'] and ticker['timestamp'] or int(
         arrow.utcnow().timestamp * 1000)
     f_bid = ticker['bid'] and ticker['bid'] or 0
     f_bid_volume = ticker['bidVolume'] and ticker['bidVolume'] or 0
     f_ask = ticker['ask'] and ticker['ask'] or 0
     f_ask_volume = ticker['askVolume'] and ticker['askVolume'] or 0
     f_vwap = ticker['vwap'] and ticker['vwap'] or 0
     f_open = ticker['open'] and ticker['open'] or 0
     f_high = ticker['high'] and ticker['high'] or 0
     f_low = ticker['low'] and ticker['low'] or 0
     f_close = ticker['close'] and ticker['close'] or 0
     f_last = ticker['last'] and ticker['last'] or 0
     f_previous_close = ticker['previousClose'] and ticker[
         'previousClose'] or 0
     f_change = ticker['change'] and ticker['change'] or 0
     f_percentage = ticker['percentage'] and ticker['percentage'] or 0
     f_average = ticker['average'] and ticker['average'] or 0
     f_base_volume = ticker['baseVolume'] and ticker['baseVolume'] or 0
     f_quote_volume = ticker['quoteVolume'] and ticker['quoteVolume'] or 0
     f_ts_update = arrow.utcnow().timestamp
     v = [
         ex_id, symbol, f_ts, f_bid, f_bid_volume, f_ask, f_ask_volume,
         f_vwap, f_open, f_high, f_low, f_close, f_last, f_previous_close,
         f_change, f_percentage, f_average, f_base_volume, f_quote_volume,
         f_ts_update
     ]
     record = TupleRecord(schema=topic.record_schema)
     record.values = v
     i = random.randint(1, 100) % len(shards)
     record.shard_id = shards[i].shard_id
     fetch_base.__symbol_ex_ticker[symbol][ex_id] = {
         "f_ts": f_ts,
         "f_bid": f_bid,
         "f_ask": f_ask,
     }
     #await fetch_base.__queue_task_spread.put(v)
     records.append(record)
     return records
Beispiel #5
0
    shards = shards_result.shards
    for shard in shards:
        print(shard)
    print("=======================================\n\n")

    while True:
        records = []

        record0 = TupleRecord(schema=topic.record_schema,
                              values=[1, 'yc1', 10.01, True, 1455869335000000])
        record0.shard_id = shards[0].shard_id
        record0.put_attribute('AK', '47')
        records.append(record0)

        record1 = TupleRecord(schema=topic.record_schema)
        record1.values = [1, 'yc1', 10.01, True, 1455869335000000]
        record1.shard_id = shards[1].shard_id
        records.append(record1)

        record2 = TupleRecord(schema=topic.record_schema)
        record2.set_value(0, 3)
        record2.set_value(1, 'yc3')
        record2.set_value('double_field', 10.03)
        record2.set_value('bool_field', False)
        record2.set_value('time_field', 1455869335000013)
        record2.shard_id = shards[2].shard_id
        records.append(record2)

        failed_indexs = dh.put_records(project_name, topic_name, records)
        print("put tuple %d records, failed list: %s" %
              (len(records), failed_indexs))
Beispiel #6
0
    async def run_fetch_ohlcv(self, ex_id, topic_name, symbols, timeframe_str,
                              since_ms, split_i, max_split_count):
        self.init_exchange(ex_id)
        if not self.exchanges[ex_id].has_api('fetchOHLCV'):
            logger.warn(self.to_string() +
                        "run_fetch_ohlcv({0}) NOT has interface".format(ex_id))
            return
        #logger.debug(self.to_string() + "run_fetch_ohlcv({0})".format(ex_id))
        '''
        if not self.exchanges[ex_id].ex.timeframes or timeframe_str not in self.exchanges[ex_id].ex.timeframes:
            logger.info(self.to_string() + "run_fetch_ohlcv({0}) NOT has timeframe={1}".format(ex_id, timeframe_str))
            return
        '''
        if not symbols or len(symbols) <= 0:
            symbols = [k for k in fetch_base.__ex_symbol_fee[ex_id].keys()]
        #logger.debug(self.to_string() + "run_fetch_ohlcv({0},{1},{2}) len(symbols)={3}".format(ex_id, topic_name, timeframe_str, len(symbols)))

        symbols_todu = []
        s = 0
        for symbol in symbols:
            if s % max_split_count == split_i:
                symbols_todu.append(symbol)
            s = s + 1
        #logger.debug(self.to_string() + "run_fetch_ohlcv({0},{1},{2}) len(symbols_todu)={3}".format(ex_id, topic_name, timeframe_str, len(symbols_todu)))
        if len(symbols_todu) <= 0:
            return

        topic, shards = g_datahub.get_topic(topic_name)
        f_timeframe = util.TimeFrame_Minutes[timeframe_str]
        while True:
            ts_start = arrow.utcnow().shift(
                minutes=-f_timeframe).timestamp * 1000
            i = 0
            for symbol in symbols_todu:
                try:
                    data = await self.exchanges[ex_id].fetch_ohlcv(
                        symbol, timeframe_str, since_ms)
                except ccxt.RequestTimeout:
                    logger.debug(
                        self.to_string() +
                        "run_fetch_ohlcv({0},{1},{2}) len(symbols_todu)={3} symbol={4} RequestTimeout "
                        .format(ex_id, topic_name, timeframe_str,
                                len(symbols_todu), symbol))
                    await asyncio.sleep(10)
                    continue
                except ccxt.DDoSProtection:
                    logger.debug(
                        self.to_string() +
                        "run_fetch_ohlcv({0},{1},{2}) len(symbols_todu)={3} symbol={4} RequestTimeout "
                        .format(ex_id, topic_name, timeframe_str,
                                len(symbols_todu), symbol))
                    await asyncio.sleep(10)
                    continue
                except Exception as e:
                    #logger.error(traceback.format_exc())
                    logger.debug(
                        self.to_string() +
                        "run_fetch_ohlcv({0},{1},{2}) len(symbols_todu)={3} Exception={4}"
                        .format(ex_id, topic_name, timeframe_str,
                                len(symbols_todu), e))
                    await asyncio.sleep(10)
                    continue
                f_ts_update = arrow.utcnow().timestamp
                #logger.debug(self.to_string() + "run_fetch_ohlcv() f_ts_update={0}".format(f_ts_update))
                records = []
                for d in data:
                    f_ts = d[0]
                    f_o = d[1]
                    f_h = d[2]
                    f_l = d[3]
                    f_c = d[4]
                    f_v = d[5]
                    record = TupleRecord(schema=topic.record_schema)
                    record.values = [
                        ex_id, symbol, f_timeframe, f_ts, f_o, f_h, f_l, f_c,
                        f_v, f_ts_update
                    ]
                    record.shard_id = shards[i % len(shards)].shard_id
                    records.append(record)
                    i = i + 1
                #logger.debug(self.to_string() + "run_fetch_ohlcv({0},{1},{2},{3})len(records) = {4}".format(ex_id, topic_name, symbol, timeframe_str, len(records)))
                g_datahub.pub_topic(topic_name, records)
                await asyncio.sleep(3)
            since_ms = ts_start
            await asyncio.sleep(3)
Beispiel #7
0
    async def run_calc_spread(self, topic_name="t_spread"):
        #logger.debug(self.to_string() + "run_calc_spread()")
        topic, shards = g_datahub.get_topic(topic_name)
        shard_count = len(shards)
        while True:
            try:
                # 数据太多,处理不完
                qsize = fetch_base.__queue_task_spread.qsize()
                if qsize >= 100:
                    logger.warn(self.to_string() +
                                "run_calc_spread() qsize={0}".format(qsize))

                # for test
                await fetch_base.__queue_task_spread.get()
                continue

                # [f_ex_id, f_symbol, f_ts, f_bid, f_bid_volume, f_ask, f_ask_volume, f_vwap, f_open, f_high, f_low, f_close, f_last, f_previous_close, f_change, f_percentage, f_average, f_base_volume, f_quote_volume]
                task_record = await fetch_base.__queue_task_spread.get()
                symbol = task_record[1]
                ex1 = task_record[0]
                ex1_name = self.exchanges[ex1].ex.name
                ex1_bid = task_record[3]
                ex1_ask = task_record[5]
                ex1_ts = task_record[2]
                ex1_fee = fetch_base.__ex_symbol_fee[ex1][
                    symbol] if fetch_base.__ex_symbol_fee[ex1][symbol] else 0
                record2s = fetch_base.__symbol_ex_ticker[
                    symbol] if fetch_base.__symbol_ex_ticker[symbol] else {}
                records = []
                for ex2, v in record2s.items():
                    if ex2 == ex1:
                        continue
                    ex2_name = self.exchanges[ex2].ex.name
                    ex2_bid = v["f_bid"]
                    ex2_ask = v["f_ask"]
                    ex2_ts = v["f_ts"]
                    ex2_fee = fetch_base.__ex_symbol_fee[ex2][
                        symbol] if fetch_base.__ex_symbol_fee[ex2][
                            symbol] else 0
                    if abs(ex1_ts - ex2_ts) > 30000:
                        logger.info(
                            self.to_string() +
                            "run_calc_spread() abs(ex1_ts - ex2_ts)={0}".
                            format(abs(ex1_ts - ex2_ts)))
                        continue
                    spread_ts = ex1_ts if ex1_ts > ex2_ts else ex2_ts
                    f_fee = (ex1_bid * ex1_fee + ex2_ask * ex2_fee)

                    f_spread = ex1_bid - ex2_ask
                    f_profit = (f_spread - f_fee)
                    f_profit_p = (f_profit / ex1_bid) if ex1_bid > 0.0 else 0.0
                    f_ts_update = arrow.utcnow().timestamp
                    record1 = TupleRecord(schema=topic.record_schema)
                    record1.values = [
                        symbol, ex1, ex1_name, ex1_bid, ex1_ts, ex1_fee, ex2,
                        ex2_name, ex2_ask, ex2_ts, ex2_fee, spread_ts,
                        f_spread, f_fee, f_profit, f_profit_p, f_ts_update
                    ]
                    i = random.randint(1, 100) % shard_count
                    record1.shard_id = shards[i].shard_id
                    records.append(record1)

                    f_spread = ex2_bid - ex1_ask
                    f_profit = (f_spread - f_fee)
                    f_profit_p = (f_profit / ex2_bid) if ex2_bid > 0.0 else 0.0
                    record2 = TupleRecord(schema=topic.record_schema)
                    record2.values = [
                        symbol, ex2, ex2_name, ex2_bid, ex2_ts, ex2_fee, ex1,
                        ex1_name, ex1_ask, ex1_ts, ex1_fee, spread_ts,
                        f_spread, f_fee, f_profit, f_profit_p, f_ts_update
                    ]
                    i = random.randint(1, 100) % shard_count
                    record2.shard_id = shards[i].shard_id
                    records.append(record2)
                g_datahub.pub_topic(topic_name, records)
            except DatahubException as e:
                logger.error(traceback.format_exc(e))
            except Exception as e:
                logger.error(traceback.format_exc(e))
            except:
                logger.error(traceback.format_exc())
Beispiel #8
0
    async def fetch_tickers(self, ex_id, topic, shards):
        # 降低 CPU ,暂时性
        await asyncio.sleep(30)

        self.init_exchange(ex_id)
        #logger.debug(self.to_string() + "fetch_tickers({0})".format(ex_id))
        records = []
        if not self.exchanges[ex_id].has_api('fetchTickers'):
            for symbol in fetch_base.__ex_symbol_fee[ex_id].keys():
                # 降低 CPU ,暂时性
                await asyncio.sleep(3)

                try:
                    rs = await self.fetch_ticker(ex_id, topic, shards, symbol)
                    records.extend(rs)
                except ccxt.RequestTimeout:
                    #logger.error(traceback.format_exc())
                    logger.info(
                        self.to_string() +
                        "fetch_tickers() fetch_ticker({0},{1}) RequestTimeout "
                        .format(ex_id, symbol))
                    await asyncio.sleep(10)
                except ccxt.DDoSProtection:
                    #logger.error(traceback.format_exc())
                    logger.info(
                        self.to_string() +
                        "fetch_tickers() fetch_ticker({0},{1}) DDoSProtection "
                        .format(ex_id, symbol))
                    await asyncio.sleep(10)
                except Exception as e:
                    #logger.error(traceback.format_exc())
                    logger.error(
                        self.to_string() +
                        "fetch_tickers() fetch_ticker({0},{1}) Exception={2}".
                        format(ex_id, symbol, e))
                    await asyncio.sleep(10)
                except:
                    logger.error(traceback.format_exc())
                    logger.error(self.to_string() +
                                 "fetch_tickers() fetch_ticker({0},{1})".
                                 format(ex_id, symbol))
                    await asyncio.sleep(10)
            logger.debug(self.to_string() +
                         "fetch_tickers({0}) len(records)={1}".format(
                             ex_id, len(records)))
            return records
        tickers = await self.exchanges[ex_id].ex.fetch_tickers()
        i = 0
        for symbol, ticker in tickers.items():
            f_ts = ticker['timestamp'] and ticker['timestamp'] or int(
                arrow.utcnow().timestamp * 1000)
            f_bid = ticker['bid'] and ticker['bid'] or 0
            f_bid_volume = ticker['bidVolume'] and ticker['bidVolume'] or 0
            f_ask = ticker['ask'] and ticker['ask'] or 0
            f_ask_volume = ticker['askVolume'] and ticker['askVolume'] or 0
            f_vwap = ticker['vwap'] and ticker['vwap'] or 0
            f_open = ticker['open'] and ticker['open'] or 0
            f_high = ticker['high'] and ticker['high'] or 0
            f_low = ticker['low'] and ticker['low'] or 0
            f_close = ticker['close'] and ticker['close'] or 0
            f_last = ticker['last'] and ticker['last'] or 0
            f_previous_close = ticker['previousClose'] and ticker[
                'previousClose'] or 0
            f_change = ticker['change'] and ticker['change'] or 0
            f_percentage = ticker['percentage'] and ticker['percentage'] or 0
            f_average = ticker['average'] and ticker['average'] or 0
            f_base_volume = ticker['baseVolume'] and ticker['baseVolume'] or 0
            f_quote_volume = ticker['quoteVolume'] and ticker[
                'quoteVolume'] or 0
            f_ts_update = arrow.utcnow().timestamp
            v = [
                ex_id, symbol, f_ts, f_bid, f_bid_volume, f_ask, f_ask_volume,
                f_vwap, f_open, f_high, f_low, f_close, f_last,
                f_previous_close, f_change, f_percentage, f_average,
                f_base_volume, f_quote_volume, f_ts_update
            ]
            record = TupleRecord(schema=topic.record_schema)
            record.values = v
            record.shard_id = shards[i % len(shards)].shard_id
            records.append(record)
            i = i + 1
            fetch_base.__symbol_ex_ticker[symbol][ex_id] = {
                "f_ts": f_ts,
                "f_bid": f_bid,
                "f_ask": f_ask,
            }
            #await fetch_base.__queue_task_spread.put(v)
        logger.debug(
            self.to_string() +
            "fetch_tickers({0}) len(records)={1}".format(ex_id, len(records)))
        return records
def handler(event, context):
    logger = logging.getLogger()

    evt = json.loads(event)
    #print("[print1]IoT trigger and send data to FunctionCompute test output, The content of event is : %s" % (evt))

    timestamp = evt['timestamp']
    values = evt['values']
    count_of_value = len(values)

    ACCESS_ID = 'XXXXX'
    ACCESS_KEY = 'XXXXX'
    ENDPOINT = 'http://dh-cn-XXXXX.aliyun-inc.com'
    dh = DataHub(ACCESS_ID, ACCESS_KEY, ENDPOINT)

    PROJECT_NAME = 'veolia_d4b_poc'
    TOPIC_NAME = 'extract_result_table'

    # ===================== put tuple records =====================
    # block等待所有shard状态ready
    dh.wait_shards_ready(PROJECT_NAME, TOPIC_NAME)

    topic = dh.get_topic(PROJECT_NAME, TOPIC_NAME)
    record_schema = topic.record_schema

    shards_result = dh.list_shard(PROJECT_NAME, TOPIC_NAME)
    shards = shards_result.shards
    shard_count = len(shards)
    #   for shard in shards:
    #     print("[print8]IoT trigger and send data to FunctionCompute test output, The Shard is : (%s)" % (shard))

    records = []

    for value in values:
        # id sample: SE433_OPC.S01.AISA0101
        id = value['id']
        id_list = id.split('.')
        id_company_code = (id_list[0].split('_'))[0]
        id_protocol_name = (id_list[0].split('_'))[1]
        id_system_code = id_list[1]
        id_tagname = id_list[2]

        v = value['v']
        q = 'true' if value['q'] else 'false'
        t = value['t']
        #print("[print7]IoT trigger and send data to FunctionCompute test output, The value is : (%s, %s, %s, %s)" % (id,v,q,t))

        rec = TupleRecord(schema=topic.record_schema)
        rec.values = [
            timestamp, id_company_code, id_protocol_name, id_system_code,
            id_tagname, v, q, t
        ]
        rec.shard_id = shards[random.randint(0, shard_count - 1)].shard_id
        records.append(rec)

    failed_indexs = dh.put_records(PROJECT_NAME, TOPIC_NAME, records)
    print("[print9] put tuple %d records, shard_id = %s, failed list: %s" %
          (len(records), rec.shard_id, failed_indexs))
    # failed_indexs如果非空最好对failed record再进行重试

    return 'success'


# event样例:
# {
# 	"timestamp":1521698375065,
# 	"values":[
# 		{
# 			"id":"SE433_OPC.S01.IW1440",
# 			"v":206,
# 			"q":true,
# 			"t":1521698358299
# 		},
# 		{
# 			"id":"SESE433_OPC433.S01.LCV1414_ACT",
# 			"v":42,
# 			"q":true,
# 			"t":1521698358222
# 		},
# 		{
# 			"id":"SE433_OPC.S01.LT1430A",
# 			"v":22,
# 			"q":true,
# 			"t":1521698358235
# 		},
# 		…
# 	]
# }
Beispiel #10
0
def handler(event, context):
    logger.debug(event)
    str = event.decode()
    if str == "":
        logger.debug("str == \"\"")
        return
    evt = json.loads(str)
    if not evt:
        logger.debug("evt is None")
        return
    records = evt.get("records")
    if not records:
        logger.debug("records is None")
        return
    logger.debug("len(records)={0}".format(len(records)))
    logger.debug(records)
    for record in records:
        data = record["data"]
        ex1 = data[0]
        symbol = data[1]
        ex1_ts = data[2]
        ex1_bid = data[3]
        ex1_ask = data[5]
        sql_select = "SELECT f_ex_id,f_bid,f_ask,f_ts FROM t_ticker_crrent where f_symbol=\"{0}\" and f_ex_id!=\"{1}\" and f_ts > (UNIX_TIMESTAMP() - 30) * 1000;".format(
            symbol, ex1)
        logger.debug("sql_select={0}".format(sql_select))
        cursor_reaed = conn_read.cursor()
        cursor_reaed.execute(sql_select)
        rows = cursor_reaed.fetchall()
        logger.debug("rows={0}".format(len(rows)))
        if len(rows) <= 0:
            logger.debug("len(rows) <= 0")
            continue
        records = []
        sql_insert = "replace into t_spread_current(f_symbol,f_ex1,f_ex2,f_spread,f_ts) values"
        c = 0
        cursor_write = conn_write.cursor()
        for row in rows:
            ex2 = row[0]
            ex2_bid = row[1]
            ex2_ask = row[2]
            ex2_ts = row[3]
            spread_ts = ex1_ts if ex1_ts > ex2_ts else ex2_ts
            sql_value1 = "('{0}','{1}','{2}',{3},{4})".format(
                symbol, ex1, ex2, ex1_bid - ex2_ask, spread_ts)
            sql_value2 = ",('{0}','{1}','{2}',{3},{4})".format(
                symbol, ex2, ex1, ex2_bid - ex1_ask, spread_ts)
            if c == 0:
                sql_insert = sql_insert + sql_value1 + sql_value2
            else:
                sql_insert = sql_insert + "," + sql_value1 + sql_value2
            c = c + 1

            record1 = TupleRecord(schema=topic.record_schema)
            record1.values = [symbol, ex1, ex2, ex1_bid - ex2_ask, spread_ts]
            record1.shard_id = shards[c % len(shards)].shard_id
            records.append(record1)
            c = c + 1

            record2 = TupleRecord(schema=topic.record_schema)
            record2.values = [symbol, ex2, ex1, ex2_bid - ex1_ask, spread_ts]
            record2.shard_id = shards[c % len(shards)].shard_id
            records.append(record2)

        sql_insert = sql_insert + ";"
        logger.debug("sql_insert={0}".format(sql_insert))
        count_write = cursor_write.execute(sql_insert)
        logger.debug(count_write)
        count_write.commit()

        logger.debug("datahub.put_records={0}".format(len(records)))
        datahub.put_records(project_name, topic_name, records)