def setup_options(): opt.set_option( 'email_sender', EmailSender(host=settings.MAIL_HOST, port=settings.MAIL_PORT, username=settings.MAIL_USERNAME, passwd=settings.MAIL_PASSWD))
async def calculate_stats(task_q): while not (opt.post_data_is_set.is_set() and opt.n_current_docus + opt.n_err_docus == 0): opt.global_n_docus.value += opt.n_current_docus opt.global_n_err_docus.value += opt.n_err_docus opt.set_option('n_current_docus', 0) opt.set_option('n_err_docus', 0) opt.logger.debug('submit stats to manager, current queue %s/100', task_q.qsize()) await asyncio.sleep(1)
def setup_args(): for k, v in settings.__dict__.items(): opt.set_option(k, v) opt.add_argument('-t', '--tasks', default='', help='Tasks you want to run') opt.add_argument('-e', '--exclude-tasks', default='', help='Tasks you do not want to run') opt.add_argument('--debug', action='store_true', default=False) opt.add_argument('--smtp_host', type=str, default=None) opt.parse_args()
async def runner(): opt.logger.debug('runner') opt.set_option('producer_is_set', Event()) opt.set_option('post_data_is_set', Event()) task_q = asyncio.Queue(maxsize=100) async with aiohttp.ClientSession() as s: await create_index(s) workers = [ asyncio.ensure_future(post_data(s, task_q)) for _ in range(opt.n_conn) ] task_producer = asyncio.ensure_future(produce_tasks(task_q)) stats_runner = asyncio.ensure_future(calculate_stats(task_q)) await task_producer opt.producer_is_set.set() await asyncio.wait(workers) opt.post_data_is_set.set() await stats_runner
def calculate_global_stats(): last_t = time.time() while not opt.global_is_done.value: t = time.time() if t - last_t < 10: time.sleep(1) continue last_t = t opt.docu_lock.acquire() speed = opt.global_n_docus.value / (t - opt.global_last_time) opt.global_n_docus.value = 0 opt.docu_lock.release() opt.err_docu_lock.acquire() err_speed = opt.global_n_err_docus.value / (t - opt.global_last_time) opt.global_n_err_docus.value = 0 opt.err_docu_lock.release() opt.set_option('global_last_time', t) opt.logger.info('%s docus/sec, %s err/sec', round(speed, 1), round(err_speed, 1))
async def post_data(s, task_q): opt.logger.debug('post_data') url = '{}{}/{}'.format(opt.addr, opt.index, opt.type) while not (opt.producer_is_set.is_set() and task_q.empty()): data = await task_q.get() try: async with s.post(url, data=data, headers=HEADERS, timeout=3) as resp: cont = await resp.text() assert resp.status == 201 except TimeoutError: opt.set_option('n_err_docus', opt.n_err_docus + 1) continue except Exception: opt.set_option('n_err_docus', opt.n_err_docus + 1) if (resp.status == 503 # Nginx load balance error or resp.status == 429): # elasticsearch queue overflow continue opt.logger.debug('Error for data {}, resp: {}'.format(data, cont), exc_info=True) else: opt.set_option('n_current_docus', opt.n_current_docus + 1)
def setup_settings(): opt.set_option('docu_lock', RLock()) opt.set_option('err_docu_lock', RLock()) opt.set_option('executor', ProcessPoolExecutor(max_workers=opt.n_procs + 1)) opt.set_option('global_n_docus', Value(c_int32, 0, lock=opt.docu_lock)) opt.set_option('global_n_err_docus', Value(c_int32, 0, lock=opt.err_docu_lock)) opt.set_option('global_is_done', Value(c_int32, 0)) opt.set_option('global_last_time', time.time()) opt.set_option('logger', setup_logger('es_benchmark')) if opt.debug: opt.logger.setLevel(logging.DEBUG) opt.set_option('n_current_docus', 0) opt.set_option('n_err_docus', 0) opt.set_option('last_time', time.time()) opt.logger.info(f"set elasticsearch: {opt.addr}") opt.logger.info( 'will insert %s documents with %s bytes for %s connections, %s processes', opt.n_docus * opt.n_procs, opt.msg_size, opt.n_conn * opt.n_procs, opt.n_procs)