def worker_query(self, worker_id): session = query.new_session() while True: # blocking call but does not harm? worker_item = self.worker_queue.get() self.logger.info("idle workers: {}, queue lengths: {}".format( self.worker_pool.free_count(), self.worker_queue.qsize())) self.logger.info("worker {} is processing roxie query {}".format( worker_id, worker_item.wid)) self.logger.info("{} {} {} {} {}".format(worker_item.wid, worker_item.endpoint, worker_item.query_name, worker_item.query_key, worker_item.key)) time_start = time.time() success, output_size, status_code, exception_description = query.execute_workload_item( session, worker_item, timeout=self.query_timeout) time_end = time.time() report_detail = { "item": worker_item.wid, "queueTimestamp": worker_item.queue_timestamp, "startTimestamp": time_start, "finishTimestamp": time_end, "success": success, "size": output_size, "status": status_code, "exception": exception_description } self.report_queue.put(report_detail)
def http_query(ctx, host, name, input): return roxie_query.run_query(roxie_query.new_session(), name, host, input[0], input[1], timeout=10)
def worker(self, worker_id): session = query.new_session() reporter_procotol = BenchmarkReporterProtocol(worker_id, self.sender) while True: worker_item = self.worker_queue.get() self.logger.info("idle workers: {}, queue lengths: {}".format( self.worker_pool.free_count(), self.worker_queue.qsize())) self.logger.info("worker {} is processing roxie query {}".format( worker_id, worker_item.wid)) if self.manual_routing_enabled: try: assigned_endpoint = self._select_endpoint( worker_item.query_name) self.logger.info('from {} to {}'.format( worker_item.endpoint, assigned_endpoint)) worker_item.endpoint = assigned_endpoint except: self.logger.exception('unable to select endpoint') self.logger.info(worker_item.endpoint, worker_item.query_name, worker_item.query_key, worker_item.key) start_timestamp = time.time() # Hack to add routing table support # need a global index to do round-robin selection success, output_size, status_code, exception_description = query.execute_workload_item( session, worker_item, timeout=self.query_timeout) finish_timestamp = time.time() reporter_procotol.report(worker_item.wid, worker_item.queue_timestamp, start_timestamp, finish_timestamp, success, output_size, status_code, exception_description)
def runner(self): session = query.new_session() while True: request = self.next_request() if request.xtype == RequestType.empty: gevent.sleep(1) elif request.xtype == RequestType.stop: break elif request.xtype == RequestType.run: self.count += 1 query.execute_workload_item(session, request.workload) gevent.sleep( 0 ) # no need to sleep because the node_bucket will do blocking call?
def worker(index): session = query.new_session() completed = False while not completed: try: worker_item = worker_queue.get(timeout=10) success, output_size = query.execute_workload_item(session, worker_item, timeout=10) mylock.acquire() if success and output_size > 100: results.append(worker_item.key) mylock.release() except: print('worker {} has done all the queries'.format(index)) completed = True
def worker(index): session = query.new_session() completed = False while not completed: try: worker_item = worker_queue.get(timeout=1) success, output_size, status_code, exception_description = query.execute_workload_item( session, worker_item, timeout=120) mylock.acquire() results[worker_item.wid] = success and output_size > 100 mylock.release() except Exception as e: #import traceback #traceback.print_exc() print('worker {} has done all the queries'.format(index)) completed = True
def worker(self, worker_id): session = query.new_session() reporter_procotol = BenchmarkReporterProtocol(worker_id, self.sender) while True: worker_item = self.worker_queue.get() self.logger.info("idle workers: {}, queue lengths: {}".format( self.worker_pool.free_count(), self.worker_queue.qsize())) self.logger.info("worker {} is processing roxie query {}".format( worker_id, worker_item.wid)) start_timestamp = time.time() success, output_size, status_code, exception_description = query.execute_workload_item( session, worker_item, timeout=self.query_timeout) finish_timestamp = time.time() reporter_procotol.report(worker_item.wid, worker_item.queue_timestamp, start_timestamp, finish_timestamp, success, output_size, status_code, exception_description)
def runner(self): session = query.new_session() while not (self.node_handler.is_completed() and self.node_handler.empty()): try: req = self.node_handler.get_wait(1) if req is not None: #print('worker {} is processing request {}'.format(self.id, req.order)) self.count += 1 t = random.random() #print('running for {} seconds'.format(t)) #print(self.id, req.workload.query_name, req.workload.endpoint) query.execute_workload_item(session, req.workload) #gevent.sleep(t) #gevent.sleep(0) else: #print('worker {} yield because qsize={}'.format(self.id, self.node_handler.qsize())) gevent.sleep(0) except Exception as e: import traceback traceback.print_exc()