def worker(args, routing): global pool _es = init_es(args) currentjson = Value(ctypes.c_char_p, b"") lock = Lock() #Check if we have an incremental refresh or not try: if (args.iq == ''): query = json.loads(args.q) else: query = json.loads(args.iq.replace("[REFRESH_DATE]", args.id)) except Exception as error: print("Error at worker, loading query (worker): " + format(error)) pool.terminate() #print (query) scan_shard(_es, args.index, '', query, routing, processDoc, args, currentjson, lock) #Last Push if (currentjson.value != b""): batchPush(currentjson.value, args)
from app.utils import parse_args, get_arg_parser, init_es from app.shards import scan_shard, get_shards_to_routing if __name__ == '__main__': parser = get_arg_parser() args = parse_args(parser) es = init_es(args) shards_to_routing = get_shards_to_routing(es, args.index, args.doc_type) jobs = [] for shard, routing in shards_to_routing.items(): print(f"shard: {shard}, routing: {routing}") query = {"query": {"match_all": {}}} scan_shard(es, args.index, args.doc_type, query, routing, lambda doc: print(doc))
def worker(args, routing): _es = init_es(args) query = {"query": {"match_all": {}}} scan_shard(_es, args.index, args.doc_type, query, routing, lambda doc: doc)