def test__repr__(loop): cl = Elasticsearch([], loop=loop) assert repr(cl) == '<Elasticsearch [<Transport []>]>' cl = Elasticsearch(['localhost:9200'], loop=loop) assert repr(cl) == ( "<Elasticsearch [<Transport [" "TCPEndpoint(scheme='http', host='localhost', port=9200)" "]>]>")
async def add_elasticsearch_connection(self): elasticsearch_host = None elasticsearch_port = None try: elasticsearch_host = os.getenv("ELASTICSEARCH_HOST", "redis.dev.muchneededllc.com") elasticsearch_port = str(os.getenv("ELASTICSEARCH_PORT", 9200)) except OSError as e: logger.error( "Couldn't get environmental variables for elasticearch. " + str(e)) exit(1) try: if self._loop is not None: address = ':'.join([elasticsearch_host, elasticsearch_port]) self._elasticsearch_client = Elasticsearch([address], loop=self._loop) logger.info(self._elasticsearch_client) self.elasticsearch_conn = True logger.debug("Created Elasticsearch Client.") else: logger.error( "Couldn't create elasticsearch client because loop hasn't been set." ) except Exception as e: logger.error("couldn't open elasticsearch.") raise Exception(e)
async def analyze(text, debug=False): es = Elasticsearch(['%s:%d' % (es_ip, es_port)]) client = IndicesClient(es) index_name = await get_recent_index(INDEX_DOCUMENTS) result = await client.analyze(index=index_name, analyzer="korean", body={"text": text}) ret = [] for x in result['tokens']: token = x["token"] type = x["type"] # 명사 실질형태소와 외국어만 추출 if type not in [ "COMPOUND", "EOJEOL", "INFLECT", "VV", "VA", "VX", "VCP", "VCN", "NNB", "E", "JKS", "JKC", "JKG", "JKO", "JKB", "JKV", "JKQ", "JX", "JC", "EP", "EF", "EC", "ETN", "ETM", "XPN", "XSN", "XSV", "XSA", "SF", "SE", "SS", "SN", "SP", "SO", "SW", "SH" ]: if debug: print("{}==>{}/{}".format(text, token, type)) if (type in ["VV", "VA"]): ret.append(token[:token.find("/V")] + "다") else: ret.append(token) else: if debug: print("XXX {}==>{}/{}".format(text, token, type)) return "".join(ret)
async def related_word_extractor(parent_docid, doc_datetime, term, debug=False): es = Elasticsearch(['%s:%d'%(es_ip, es_port)]) #print("%s %d" % ((es_ip, es_port))) highlight_req = { "_source" : [""], "query": { "bool": { "filter": [ { "term": { "_id": parent_docid } }, { "query_string": { "query": term, "fields": ["doc_title", "doc_content"], "default_operator": "AND" } } ] } }, "highlight": { "fields": { "_all" : {}, "doc_title": { "fragment_size": 30, "number_of_fragments": 1, "fragmenter": "simple" }, "doc_content": { "fragment_size": 30, "number_of_fragments": 3, "fragmenter": "simple" } } } } result = await es.search(index=INDEX_DOCUMENTS+"-"+re.sub("-" , ".", doc_datetime[:doc_datetime.find("T")]), doc_type=TYPE_DOC, body=highlight_req) related = [] if result['hits']['total']>0: title_fragments = [] content_fragments = [] for a in result['hits']['hits']: if 'doc_title' in a['highlight']: title_fragments = [ fragment for fragment in a['highlight']['doc_title'] ] if 'doc_content' in a['highlight']: content_fragments = [ fragment for fragment in a['highlight']['doc_content'] ] for f in (title_fragments+content_fragments): related += await get_close_word(f, debug) es.close() return list(filter(lambda x:len(x)>1, list(sorted(set(related), key=lambda x:related.index(x)))))
def client(es_params, index, loop): client = Elasticsearch([{'host': es_params['host']}], loop=loop) try: loop.run_until_complete(client.delete(index, '', '')) except NotFoundError: pass yield client client.close()
def client(es_params, loop): with closing(Elasticsearch([{'host': es_params['host']}], loop=loop)) as c: INDEX = 'test_elasticsearch' try: loop.run_until_complete(c.delete(INDEX, '', '')) except NotFoundError: pass yield c
def setUp(self): self._index = 'test_elasticsearch' self.loop = asyncio.new_event_loop() asyncio.set_event_loop(None) self.cl = Elasticsearch([{'host': 'localhost'}], loop=self.loop) self.addCleanup(self.cl.close) try: self.loop.run_until_complete(self.cl.delete(self._index, '', '')) except NotFoundError: pass
async def get_recent_index(index): es = Elasticsearch(['%s:%d' % (es_ip, es_port)]) cat2es = CatClient(es) result = await cat2es.indices(index, h="index") ''' es_conn = http.client.HTTPConnection(es_ip, es_port) es_conn.request("GET", "_cat/indices/"+index+"?h=index&s=index:desc", "", { "Content-Type" : "application/json" }) es_result = es_conn.getresponse().read().decode('utf-8') ''' es_result = result idx_list = sorted([idx for idx in es_result.split("\n")], reverse=True) if len(es_result) > 0: return idx_list[0].strip() else: raise EsError
def setUp(self): self._index = 'elastic_search' self.repo_name = 'test_repo' self.repo_path = self._create_temp_dir() # otherwise elasticsearch can't access it os.chmod(self.repo_path, 0o777) self.snapshot_name = 'test_snapshot' self.loop = asyncio.new_event_loop() asyncio.set_event_loop(None) self.cl = Elasticsearch([{'host': 'localhost'}], loop=self.loop) self.addCleanup(self.cl.close) try: self.loop.run_until_complete( self.cl.delete(self._index, refresh=True)) except NotFoundError: pass
def client(es_params, loop, repo_name, snapshot_name): client = Elasticsearch([{'host': es_params['host']}], loop=loop) try: loop.run_until_complete(client.delete(INDEX, '', '')) except NotFoundError: pass yield client # cleaning up just in case try: loop.run_until_complete( client.snapshot.delete(repo_name, snapshot_name)) except NotFoundError: pass try: loop.run_until_complete(client.snapshot.delete_repository(repo_name)) except NotFoundError: pass client.close()
async def update_emotions(index_name, emotion_id, pos_neg): logger.debug("[update] %s, %s" % (emotion_id, pos_neg)) request_body = { "script": { "inline": "ctx._source.put('emotion_type', params.emotion_type); ctx._source.put('upd_datetime', params.upd_datetime);", "lang": "painless", "params": { "emotion_type": pos_neg, "upd_datetime": str(dt.now().strftime('%Y-%m-%dT%H:%M:%S')) } } } es = Elasticsearch(['%s:%d' % (es_ip, es_port)]) r = await es.update(index=index_name, doc_type=TYPE_DOC, id=emotion_id, body=request_body, refresh=True)
async def find_to_which_index(topic_id): whichIndex = None request_body = {"query": {"term": {"_id": topic_id}}} es = Elasticsearch(['%s:%d' % (es_ip, es_port)]) es_result = await es.search(TOPICS_TO_SEARCH, doc_type="doc", body=request_body) ''' es_conn = http.client.HTTPConnection(es_ip, es_port) es_conn.request("POST", "/"+INDEX_DOCUMENTS+"/"+TYPE_DOC+"/_search", json.dumps(request_body), { "Content-Type" : "application/json" }) json_result = json.loads(es_conn.getresponse().read()) ''' #json_result = json.loads(es_result) if 'hits' in es_result: if es_result['hits']['total'] > 0: whichIndex = es_result['hits']['hits'][0]['_index'] else: #print('No hits.') whichIndex = await get_recent_index(TOPICS_TO_SEARCH) else: raise EsError return whichIndex
def insertTopics(es_result): if 'hits' in es_result: ######## topic_class가 VV인 항목들을 검색해서 그 결과를 넘겨주고 bulk를 만듦. fts = [ makeUpdateBulks(x) for x in es_result['hits']['hits'] ] some_bulks = yield from asyncio.gather(*fts) ######## BULK INSERT from elasticsearch import Elasticsearch es_client=Elasticsearch(":".join([es_ip, str(es_port)])) bulk_result=0 try: bulk_result += helpers.bulk(es_client, list(filter(lambda x:x is not None, some_bulks)), refresh=True)[0] except EsError as e: retry = 0 logger.error("[insertTopics] %s (retry:%d)"%(str(e), retry)) while retry <= 5: retry += 1 print("10초 간 쉬었다가 다시!\n") time.sleep(10) try: print("색인 {0}번째 재시도..".format(retry)) bulk_result += helpers.bulk(es_client, list(filter(lambda x:x is not None, some_bulks)), refresh=True)[0] break except EsError as e: logger.error("[insertTopics] %s (retry:%d)"%(str(e), retry)) continue except exceptions.ConnectionTimeout as timeoutError: retry = 0 logger.error("[insertTopics] %s (retry:%d)"%(str(timeoutError), retry)) while retry <= 5: retry += 1 print("10초 간 쉬었다가 다시!\n") time.sleep(10) try: print("색인 {0}번째 재시도..".format(retry)) bulk_result += helpers.bulk(es_client, list(filter(lambda x:x is not None, some_bulks)), refresh=True)[0] break except exceptions.ConnectionTimeout as timeoutError: logger.error("[insertTopics] %s (retry:%d)"%(str(timeoutError), retry)) continue except aiohttp.client_exceptions.ClientConnectorError as connectError: retry = 0 logger.error("[insertTopics] %s (retry:%d)"%(str(connectError), retry)) while retry <= 5: retry += 1 print("10초 간 쉬었다가 다시!\n") time.sleep(10) try: print("색인 {0}번째 재시도..".format(retry)) bulk_result += helpers.bulk(es_client, list(filter(lambda x:x is not None, some_bulks)), refresh=True)[0] break except aiohttp.client_exceptions.ClientConnectorError as connectError: logger.error("[insertTopics] %s (retry:%d)"%(str(connectError), retry)) continue except OSError as oserror: retry = 0 logger.error("[insertTopics] %s (retry:%d)"%(str(oserror), retry)) while retry <= 5: retry += 1 print("10초 간 쉬었다가 다시!\n") time.sleep(10) try: print("색인 {0}번째 재시도..".format(retry)) bulk_result += helpers.bulk(es_client, list(filter(lambda x:x is not None, some_bulks)), refresh=True)[0] break except OSError as oserror: logger.error("[insertTopics] %s (retry:%d)"%(str(oserror), retry)) continue except urllib3.exceptions.NewConnectionError as connectionError: retry = 0 logger.error("[insertTopics] %s (retry:%d)"%(str(connectionError), retry)) while retry <= 5: retry += 1 print("10초 간 쉬었다가 다시!\n") time.sleep(10) try: print("색인 {0}번째 재시도..".format(retry)) bulk_result += helpers.bulk(es_client, list(filter(lambda x:x is not None, some_bulks)), refresh=True)[0] break except urllib3.exceptions.NewConnectionError as connectionError: logger.error("[insertTopics] %s (retry:%d)"%(str(connectionError), retry)) continue except: ex = traceback.format_exc() logger.error("[insertTopics] unknown error. Traceback >> %s " % ex) logger.debug("%d are successfully inserted."%bulk_result)
from aioes import Elasticsearch from .settings import ELASTICSEARCH_ENDPOINTS elastic = Elasticsearch(ELASTICSEARCH_ENDPOINTS)
def conn(self): if self._conn is None: self._conn = Elasticsearch(**self.settings['connection_settings']) return self._conn
def insert_topics(data): some_bulks = '' bulk_result = 0 try: result = teaclient.request(data) #print(result) root = et.fromstring(result) status = root.findall( "./results/result[@name='status']")[0].text if len( root.findall("./results/result[@name='status']")) > 0 else '' #print(">>> Tea client response : %s" % status) if status == "success" and len( root.findall("./results/result[@name='keywords']")) > 0: result_scd = root.findall( "./results/result[@name='keywords']")[0].text terms = "" verbs = "" for line in result_scd.split("\n"): if line.startswith("<TERMS>"): terms = line.replace("<TERMS>", "") # 하늘:387^테스트:14^도움:11 elif line.startswith("<VERBS>"): verbs = line.replace("<VERBS>", "") # 하늘:387^테스트:14^도움:11 #print("### terms : %s" % terms) # <TERMS> #t = asyncio.ensure_future(time_log()) terms = [('NN', term) for term in terms.split(teaclient.ITEM_DELIMITER)] verbs = [('VV', verb) for verb in verbs.split(teaclient.ITEM_DELIMITER)] from elasticsearch import Elasticsearch es_client = Elasticsearch(":".join([es_ip, str(es_port)])) try: fts = [make_bulk(t, data) for t in (terms + verbs)] #t.cancel() some_bulks = yield from asyncio.gather(*fts) bulk_result += helpers.bulk( es_client, list(filter(lambda x: x and len(x) > 0, some_bulks)), refresh=True)[0] except EsError as e: retry = 0 logger.error("[insert_topics] %s (retry:%d)" % (str(e), retry)) while retry <= 5: retry += 1 print("10초 간 쉬었다가 다시!\n") time.sleep(10) try: print("색인 {0}번째 재시도..".format(retry)) bulk_result += helpers.bulk( es_client, list(filter(lambda x: x and len(x) > 0, some_bulks)), refresh=True)[0] break except EsError as e: logger.error("[insert_topics] %s (retry:%d)" % (str(e), retry)) continue except exceptions.ConnectionTimeout as timeoutError: retry = 0 logger.error("[insert_topics] %s (retry:%d)" % (str(timeoutError), retry)) while retry <= 5: retry += 1 print("10초 간 쉬었다가 다시!\n") time.sleep(10) try: print("색인 {0}번째 재시도..".format(retry)) bulk_result += helpers.bulk( es_client, list(filter(lambda x: x and len(x) > 0, some_bulks)), refresh=True)[0] break except exceptions.ConnectionTimeout as timeoutError: logger.error("[insert_topics] %s (retry:%d)" % (str(timeoutError), retry)) continue except aiohttp.client_exceptions.ClientConnectorError as connectError: retry = 0 logger.error("[insert_topics] %s (retry:%d)" % (str(connectError), retry)) while retry <= 5: retry += 1 print("10초 간 쉬었다가 다시!\n") time.sleep(10) try: print("색인 {0}번째 재시도..".format(retry)) bulk_result += helpers.bulk( es_client, list(filter(lambda x: x and len(x) > 0, some_bulks)), refresh=True)[0] break except aiohttp.client_exceptions.ClientConnectorError as connectError: logger.error("[insert_topics] %s (retry:%d)" % (str(connectError), retry)) continue except OSError as oserror: retry = 0 logger.error("[insert_topics] %s (retry:%d)" % (str(oserror), retry)) while retry <= 5: retry += 1 print("10초 간 쉬었다가 다시!\n") time.sleep(10) try: print("색인 {0}번째 재시도..".format(retry)) bulk_result += helpers.bulk( es_client, list(filter(lambda x: x and len(x) > 0, some_bulks)), refresh=True)[0] break except OSError as oserror: logger.error("[insert_topics] %s (retry:%d)" % (str(oserror), retry)) continue except urllib3.exceptions.NewConnectionError as connectionError: retry = 0 logger.error("[insert_topics] %s (retry:%d)" % (str(connectionError), retry)) while retry <= 5: retry += 1 print("10초 간 쉬었다가 다시!\n") time.sleep(10) try: print("색인 {0}번째 재시도..".format(retry)) bulk_result += helpers.bulk( es_client, list(filter(lambda x: x and len(x) > 0, some_bulks)), refresh=True)[0] break except urllib3.exceptions.NewConnectionError as connectionError: logger.error("[insert_topics] %s (retry:%d)" % (str(connectionError), retry)) continue except: ex = traceback.format_exc() logger.error( "[insert_topics] unknown error. Traceback >> %s " % ex) logger.debug("%d are successfully inserted." % bulk_result) except ParseError as xmlerror: logger.error("[insert_topics] TeaClient failed. (%s)" % str(xmlerror)) logger.error("==============> teaclient's xml response : %s" % result)
def insert_emotions(project_seq, document_data): some_bulks = '' bulk_result = 0 try: bica_ip, bica_port, concept_id = mariadbclient.get_bica_info( project_seq) bica_result = request2bica( bica_ip, bica_port, concept_id, document_data['_source']['doc_title'] + ' ' + document_data['_source']['doc_content']) if bica_result: json_result = json.loads(bica_result) from elasticsearch import Elasticsearch es_client = Elasticsearch(":".join([es_ip, str(es_port)])) try: fts = [ make_bulk(document_data, r) for r in json_result['result'] ] #t.cancel() some_bulks = yield from asyncio.gather(*fts) bulk_result += helpers.bulk( es_client, list(filter(lambda x: x and len(x) > 0, some_bulks)))[0] except exceptions.ConnectionTimeout as timeoutError: retry = 0 logger.error("[insert_emotions] %s (retry:%d)" % (str(timeoutError), retry)) while retry <= 5: retry += 1 print("10초 간 쉬었다가 다시!\n") time.sleep(10) try: print("색인 {0}번째 재시도..".format(retry)) bulk_result += helpers.bulk( es_client, list(filter(lambda x: x and len(x) > 0, some_bulks)))[0] break except exceptions.ConnectionTimeout as timeoutError: logger.error("[insert_emotions] %s (retry:%d)" % (str(timeoutError), retry)) continue except aiohttp.client_exceptions.ClientConnectorError as connectError: retry = 0 logger.error("[insert_emotions] %s (retry:%d)" % (str(connectError), retry)) while retry <= 5: retry += 1 print("10초 간 쉬었다가 다시!\n") time.sleep(10) try: print("색인 {0}번째 재시도..".format(retry)) bulk_result += helpers.bulk( es_client, list(filter(lambda x: x and len(x) > 0, some_bulks)))[0] break except aiohttp.client_exceptions.ClientConnectorError as connectError: logger.error("[insert_emotions] %s (retry:%d)" % (str(connectError), retry)) continue except OSError as oserror: retry = 0 logger.error("[insert_emotions] %s (retry:%d)" % (str(oserror), retry)) while retry <= 5: retry += 1 print("10초 간 쉬었다가 다시!\n") time.sleep(10) try: print("색인 {0}번째 재시도..".format(retry)) bulk_result += helpers.bulk( es_client, list(filter(lambda x: x and len(x) > 0, some_bulks)))[0] break except OSError as oserror: logger.error("[insert_emotions] %s (retry:%d)" % (str(oserror), retry)) continue except urllib3.exceptions.NewConnectionError as connectionError: retry = 0 logger.error("[insert_emotions] %s (retry:%d)" % (str(connectionError), retry)) while retry <= 5: retry += 1 print("10초 간 쉬었다가 다시!\n") time.sleep(10) try: print("색인 {0}번째 재시도..".format(retry)) bulk_result += helpers.bulk( es_client, list(filter(lambda x: x and len(x) > 0, some_bulks)))[0] break except urllib3.exceptions.NewConnectionError as connectionError: logger.error("[insert_emotions] %s (retry:%d)" % (str(connectionError), retry)) continue except: ex = traceback.format_exc() logger.error( "[insert_emotions] unknown error. Traceback >> %s " % ex) logger.debug("%d are successfully inserted." % bulk_result) except: ex = traceback.format_exc() logger.error("[insert_emotions] unknown error. Traceback >> %s " % ex)
def insert_topics(data): #es = Elasticsearch(['%s:%d'%(es_ip, es_port)]) some_bulks = '' bulk_result = 0 #bulk_result = None try: result = teaclient.request(data) #print(result) root = et.fromstring(result) status = root.findall( "./results/result[@name='status']")[0].text if len( root.findall("./results/result[@name='status']")) > 0 else '' #print(">>> Tea client response : %s" % status) if status == "success" and len( root.findall("./results/result[@name='keywords']")) > 0: result_scd = root.findall( "./results/result[@name='keywords']")[0].text terms = "" verbs = "" for line in result_scd.split("\n"): if line.startswith("<TERMS>"): terms = line.replace("<TERMS>", "") # 하늘:387^테스트:14^도움:11 elif line.startswith("<VERBS>"): verbs = line.replace("<VERBS>", "") # 하늘:387^테스트:14^도움:11 #print("### terms : %s" % terms) # <TERMS> #t = asyncio.ensure_future(time_log()) terms = [('NN', term) for term in terms.split(teaclient.ITEM_DELIMITER)] verbs = [('VV', verb) for verb in verbs.split(teaclient.ITEM_DELIMITER)] # 2018.03.26 terms와 verbs에 모두 등장하면 명사형으로 간주. newDict = {} for cl, topic in terms + verbs: if topic in newDict: newDict[topic]['cnt'] += 1 else: newDict[topic] = {'cnt': 1, 'topic_class': cl} newArr = [] for x in newDict.items(): if x[1]['cnt'] > 1 or x[1]['topic_class'] == 'NN': newArr.append(('NN', x[0])) else: newArr.append(('VV', x[0])) from elasticsearch import Elasticsearch es_client = Elasticsearch(":".join([es_ip, str(es_port)])) try: #fts = [ make_bulk(t, data) for t in (terms+verbs) ] fts = [make_bulk(t, data) for t in (newArr)] #t.cancel() some_bulks = yield from asyncio.gather(*fts) ''' thisBulk = [ [ {'update' : { '_index' : 'topics-2018.01.01', '_type' : 'doc', ....... }, {'topic' : '증권', 'topic_id' : ..... } ], [ {'update' : { '_index' : 'topics-2018.01.01', '_type' : 'doc', ....... }, {'topic' : '은행', 'topic_id' : ..... }, ] ... ] thisBulk = yield from asyncio.gather(*fts) some_bulks = [ y for x in thisBulk for y in x ] ''' bulk_result += helpers.bulk( es_client, list(filter(lambda x: x and len(x) > 0, some_bulks)), refresh=True)[0] #bulk_result = yield from es.bulk(filter(lambda x:x and len(x)>0, some_bulks)) except EsError as e: retry = 0 logger.error("[insert_topics] %s (retry:%d)" % (str(e), retry)) while retry <= 5: retry += 1 print("10초 간 쉬었다가 다시!\n") time.sleep(10) try: print("색인 {0}번째 재시도..".format(retry)) bulk_result += helpers.bulk( es_client, list(filter(lambda x: x and len(x) > 0, some_bulks)), refresh=True)[0] #bulk_result = yield from es.bulk(filter(lambda x:x and len(x)>0, some_bulks)) break except EsError as e: logger.error("[insert_topics] %s (retry:%d)" % (str(e), retry)) continue except exceptions.ConnectionTimeout as timeoutError: retry = 0 logger.error("[insert_topics] %s (retry:%d)" % (str(timeoutError), retry)) while retry <= 5: retry += 1 print("10초 간 쉬었다가 다시!\n") time.sleep(10) try: print("색인 {0}번째 재시도..".format(retry)) bulk_result += helpers.bulk( es_client, list(filter(lambda x: x and len(x) > 0, some_bulks)), refresh=True)[0] #bulk_result = yield from es.bulk(filter(lambda x:x and len(x)>0, some_bulks)) break except exceptions.ConnectionTimeout as timeoutError: logger.error("[insert_topics] %s (retry:%d)" % (str(timeoutError), retry)) continue except aiohttp.client_exceptions.ClientConnectorError as connectError: retry = 0 logger.error("[insert_topics] %s (retry:%d)" % (str(connectError), retry)) while retry <= 5: retry += 1 print("10초 간 쉬었다가 다시!\n") time.sleep(10) try: print("색인 {0}번째 재시도..".format(retry)) bulk_result += helpers.bulk( es_client, list(filter(lambda x: x and len(x) > 0, some_bulks)), refresh=True)[0] #bulk_result = yield from es.bulk(filter(lambda x:x and len(x)>0, some_bulks)) break except aiohttp.client_exceptions.ClientConnectorError as connectError: logger.error("[insert_topics] %s (retry:%d)" % (str(connectError), retry)) continue except OSError as oserror: retry = 0 logger.error("[insert_topics] %s (retry:%d)" % (str(oserror), retry)) while retry <= 5: retry += 1 print("10초 간 쉬었다가 다시!\n") time.sleep(10) try: print("색인 {0}번째 재시도..".format(retry)) bulk_result += helpers.bulk( es_client, list(filter(lambda x: x and len(x) > 0, some_bulks)), refresh=True)[0] #bulk_result = yield from es.bulk(filter(lambda x:x and len(x)>0, some_bulks)) break except OSError as oserror: logger.error("[insert_topics] %s (retry:%d)" % (str(oserror), retry)) continue except urllib3.exceptions.NewConnectionError as connectionError: retry = 0 logger.error("[insert_topics] %s (retry:%d)" % (str(connectionError), retry)) while retry <= 5: retry += 1 print("10초 간 쉬었다가 다시!\n") time.sleep(10) try: print("색인 {0}번째 재시도..".format(retry)) bulk_result += helpers.bulk( es_client, list(filter(lambda x: x and len(x) > 0, some_bulks)), refresh=True)[0] #bulk_result = yield from es.bulk(filter(lambda x:x and len(x)>0, some_bulks)) break except urllib3.exceptions.NewConnectionError as connectionError: logger.error("[insert_topics] %s (retry:%d)" % (str(connectionError), retry)) continue except: ex = traceback.format_exc() logger.error( "[insert_topics] unknown error. Traceback >> %s " % ex) logger.debug("%d are successfully inserted." % bulk_result) #logger.debug("%d are successfully inserted."%len(bulk_result['items'])) except ParseError as xmlerror: logger.error("[insert_topics] TeaClient failed. (%s)" % str(xmlerror)) logger.error("==============> teaclient's xml response : %s" % result)