def _push_rollpair(self, rp: RollPair, rs_header: ErRollSiteHeader, options: dict = None): if options is None: options = {} rs_key = rs_header.get_rs_key() if L.isEnabledFor(logging.DEBUG): L.debug( f"pushing rollpair: rs_key={rs_key}, rs_header={rs_header}, rp.count={rp.count()}" ) start_time = time.time() rs_header._total_partitions = rp.get_partitions() serdes = options.get('serdes', None) if serdes is not None: rs_header._options['serdes'] = serdes wrapee_cls = options.get('wrapee_cls', None) if serdes is not None: rs_header._options['wrapee_cls'] = wrapee_cls batches_per_stream = self.push_batches_per_stream body_bytes = self.batch_body_bytes endpoint = self.ctx.proxy_endpoint max_retry_cnt = self.push_max_retry long_retry_cnt = self.push_long_retry per_stream_timeout = self.push_per_stream_timeout def _push_partition(ertask): rs_header._partition_id = ertask._inputs[0]._id L.trace( f"pushing rollpair partition. rs_key={rs_key}, partition_id={rs_header._partition_id}, rs_header={rs_header}" ) from eggroll.core.grpc.factory import GrpcChannelFactory from eggroll.core.proto import proxy_pb2_grpc grpc_channel_factory = GrpcChannelFactory() with create_adapter(ertask._inputs[0]) as db, db.iteritems() as rb: # NOTICE AGAIN: all modifications to rs_header are limited in bs_helper. # rs_header is shared by bs_helper and here. any modification in bs_helper affects this header. # Remind that python's object references are passed by value, # meaning the 'pointer' is copied, while the contents are modificable bs_helper = _BatchStreamHelper(rs_header) bin_batch_streams = bs_helper._generate_batch_streams( pair_iter=rb, batches_per_stream=batches_per_stream, body_bytes=body_bytes) channel = grpc_channel_factory.create_channel(endpoint) stub = proxy_pb2_grpc.DataTransferServiceStub(channel) for batch_stream in bin_batch_streams: batch_stream_data = list(batch_stream) cur_retry = 0 exception = None while cur_retry < max_retry_cnt: L.trace( f'pushing rollpair partition stream. rs_key={rs_key}, partition_id={rs_header._partition_id}, rs_header={rs_header}, cur_retry={cur_retry}' ) try: stub.push(bs_helper.generate_packet( batch_stream_data, cur_retry), timeout=per_stream_timeout) exception = None break except Exception as e: if cur_retry < max_retry_cnt - long_retry_cnt: retry_interval = round( min(2 * cur_retry, 20) + random.random() * 10, 3) else: retry_interval = round( 300 + random.random() * 10, 3) L.warn( f"push rp partition error. rs_key={rs_key}, partition_id={rs_header._partition_id}, rs_header={rs_header}, max_retry_cnt={max_retry_cnt}, cur_retry={cur_retry}, retry_interval={retry_interval}", exc_info=e) time.sleep(retry_interval) if isinstance(e, RpcError) and e.code( ).name == 'UNAVAILABLE': channel = grpc_channel_factory.create_channel( endpoint, refresh=True) stub = proxy_pb2_grpc.DataTransferServiceStub( channel) exception = e finally: cur_retry += 1 if exception is not None: L.exception( f"push partition failed. rs_key={rs_key}, partition_id={rs_header._partition_id}, rs_header={rs_header}, cur_retry={cur_retry}", exc_info=exception) raise exception L.trace( f'pushed rollpair partition stream. rs_key={rs_key}, partition_id={rs_header._partition_id}, rs_header={rs_header}, retry count={cur_retry - 1}' ) L.trace( f"pushed rollpair partition. rs_key={rs_key}, partition_id={rs_header._partition_id}, rs_header={rs_header}" ) rp.with_stores(_push_partition, options={"__op": "push_partition"}) if L.isEnabledFor(logging.DEBUG): L.debug( f"pushed rollpair: rs_key={rs_key}, rs_header={rs_header}, count={rp.count()}, elapsed={time.time() - start_time}" ) self.ctx.pushing_latch.count_down()
def _push_bytes(self, obj, rs_header: ErRollSiteHeader, options: dict = None): if options is None: options = {} start_time = time.time() rs_key = rs_header.get_rs_key() int_size = 4 if L.isEnabledFor(logging.DEBUG): L.debug(f"pushing object: rs_key={rs_key}, rs_header={rs_header}") def _generate_obj_bytes(py_obj, body_bytes): key_id = 0 obj_bytes = pickle.dumps(py_obj) obj_bytes_len = len(obj_bytes) cur_pos = 0 while cur_pos <= obj_bytes_len: yield key_id.to_bytes( int_size, "big"), obj_bytes[cur_pos:cur_pos + body_bytes] key_id += 1 cur_pos += body_bytes rs_header._partition_id = 0 rs_header._total_partitions = 1 serdes = options.get('serdes', None) if serdes is not None: rs_header._options['serdes'] = serdes wrapee_cls = options.get('wrapee_cls', None) if serdes is not None: rs_header._options['wrapee_cls'] = wrapee_cls # NOTICE: all modifications to rs_header are limited in bs_helper. # rs_header is shared by bs_helper and here. any modification in bs_helper affects this header. # Remind that python's object references are passed by value, # meaning the 'pointer' is copied, while the contents are modificable bs_helper = _BatchStreamHelper(rs_header=rs_header) bin_batch_streams = bs_helper._generate_batch_streams( pair_iter=_generate_obj_bytes(obj, self.batch_body_bytes), batches_per_stream=self.push_batches_per_stream, body_bytes=self.batch_body_bytes) grpc_channel_factory = GrpcChannelFactory() channel = grpc_channel_factory.create_channel(self.ctx.proxy_endpoint) stub = proxy_pb2_grpc.DataTransferServiceStub(channel) max_retry_cnt = self.push_max_retry long_retry_cnt = self.push_long_retry per_stream_timeout = self.push_per_stream_timeout # if use stub.push.future here, retry mechanism is a problem to solve for batch_stream in bin_batch_streams: cur_retry = 0 batch_stream_data = list(batch_stream) exception = None while cur_retry < max_retry_cnt: L.trace( f'pushing object stream. rs_key={rs_key}, rs_header={rs_header}, cur_retry={cur_retry}' ) try: stub.push(bs_helper.generate_packet( batch_stream_data, cur_retry), timeout=per_stream_timeout) exception = None break except Exception as e: if cur_retry <= max_retry_cnt - long_retry_cnt: retry_interval = round( min(2 * cur_retry, 20) + random.random() * 10, 3) else: retry_interval = round(300 + random.random() * 10, 3) L.warn( f"push object error. rs_key={rs_key}, partition_id={rs_header._partition_id}, rs_header={rs_header}, max_retry_cnt={max_retry_cnt}, cur_retry={cur_retry}, retry_interval={retry_interval}", exc_info=e) time.sleep(retry_interval) if isinstance(e, RpcError) and e.code().name == 'UNAVAILABLE': channel = grpc_channel_factory.create_channel( self.ctx.proxy_endpoint, refresh=True) stub = proxy_pb2_grpc.DataTransferServiceStub(channel) exception = e finally: cur_retry += 1 if exception is not None: L.exception( f"push object failed. rs_key={rs_key}, partition_id={rs_header._partition_id}, rs_header={rs_header}, cur_retry={cur_retry}", exc_info=exception) raise exception L.trace( f'pushed object stream. rs_key={rs_key}, rs_header={rs_header}, cur_retry={cur_retry - 1}' ) L.debug( f"pushed object: rs_key={rs_key}, rs_header={rs_header}, is_none={obj is None}, elapsed={time.time() - start_time}" ) self.ctx.pushing_latch.count_down()