Beispiel #1
0
 def count_batch(self, rs_header: ErRollSiteHeader, batch_pairs):
     L.trace(f'count batch. rs_key={rs_header.get_rs_key()}, rs_header={rs_header}, batch_pairs={batch_pairs}')
     batch_seq_id = rs_header._batch_seq
     stream_seq_id = rs_header._stream_seq
     if self._rs_header is None:
         self._rs_header = rs_header
         self._rs_key = rs_header.get_rs_key()
         L.debug(f"header arrived. rs_key={rs_header.get_rs_key()}, rs_header={rs_header}")
         self._header_arrive_event.set()
     self._batch_seq_to_pair_counter[batch_seq_id] = batch_pairs
     self._stream_seq_to_pair_counter[stream_seq_id] += batch_pairs
     self._stream_seq_to_batch_seq[stream_seq_id] = batch_seq_id
Beispiel #2
0
    def _pull_one(self, rs_header: ErRollSiteHeader, options: dict = None):
        if options is None:
            options = {}

        start_time = time.time()
        rs_key = rp_name = rs_header.get_rs_key()
        rp_namespace = self.roll_site_session_id
        transfer_tag_prefix = "putBatch-" + rs_header.get_rs_key() + "-"
        last_total_batches = None
        last_cur_pairs = -1
        pull_attempts = 0
        data_type = None
        L.debug(f'pulling rs_key={rs_key}')
        try:
            # make sure rollpair already created
            pull_header_interval = self.pull_header_interval
            pull_header_timeout = self.pull_header_timeout  # skips pickling self
            pull_interval = self.pull_interval  # skips pickling self

            def get_partition_status(task):
                put_batch_task = PutBatchTask(
                    transfer_tag_prefix + str(task._inputs[0]._id), None)
                return put_batch_task.get_status(pull_interval)

            def get_status(roll_site):
                pull_status = {}
                total_pairs = 0
                total_batches = 0
                all_finished = True

                final_options = options.copy()
                final_options['create_if_missing'] = False
                store = roll_site.ctx.rp_ctx.load(name=rp_name,
                                                  namespace=rp_namespace,
                                                  options=final_options)

                if store is None:
                    raise ValueError(
                        f'illegal state for rp_name={rp_name}, rp_namespace={rp_namespace}'
                    )

                all_status = store.with_stores(
                    get_partition_status,
                    options={"__op": "get_partition_status"})

                for part_id, part_status in all_status:
                    if not part_status.is_finished:
                        all_finished = False
                    pull_status[part_id] = part_status
                    total_batches += part_status.total_batches
                    total_pairs += part_status.total_pairs

                return pull_status, all_finished, total_batches, total_pairs

            def clear_status(task):
                return PutBatchTask(transfer_tag_prefix +
                                    str(task._inputs[0]._id)).clear_status()

            wait_time = 0
            header_response = None
            while wait_time < pull_header_timeout and \
                    (header_response is None or not isinstance(header_response[0][1], ErRollSiteHeader)):
                final_options = options.copy()
                final_options['create_if_missing'] = True
                final_options['total_partitions'] = 1
                header_response = self.ctx.rp_ctx.load(name=STATUS_TABLE_NAME,
                                                       namespace=rp_namespace,
                                                       options=final_options) \
                    .with_stores(lambda x: PutBatchTask(transfer_tag_prefix + "0").get_header(pull_header_interval), options={"__op": "pull_header"})
                wait_time += pull_header_interval

                #pull_status, all_finished, total_batches, total_pairs = stat_all_status(self)
                L.debug(
                    f"roll site get header_response: rs_key={rs_key}, rs_header={rs_header}, wait_time={wait_time}"
                )

            if header_response is None or not isinstance(
                    header_response[0][1], ErRollSiteHeader):
                raise IOError(
                    f"roll site pull header failed: rs_key={rs_key}, rs_header={rs_header}, timeout={self.pull_header_timeout}"
                )
            else:
                header: ErRollSiteHeader = header_response[0][1]
                # TODO:0:  push bytes has only one partition, that means it has finished, need not get_status
                data_type = header._data_type
                L.debug(
                    f"roll site pull header successful: rs_key={rs_key}, rs_header={header}"
                )

            pull_status = {}
            for cur_retry in range(self.pull_max_retry):
                pull_attempts = cur_retry
                pull_status, all_finished, total_batches, cur_pairs = get_status(
                    self)

                if not all_finished:
                    L.debug(
                        f'getting status NOT finished for rs_key={rs_key}, '
                        f'rs_header={rs_header}, '
                        f'cur_status={pull_status}, '
                        f'attempts={pull_attempts}, '
                        f'cur_pairs={cur_pairs}, '
                        f'last_cur_pairs={last_cur_pairs}, '
                        f'total_batches={total_batches}, '
                        f'last_total_batches={last_total_batches}, '
                        f'elapsed={time.time() - start_time}')
                    if last_cur_pairs == cur_pairs and cur_pairs > 0:
                        raise IOError(
                            f"roll site pull waiting failed because there is no updated progress: rs_key={rs_key}, "
                            f"rs_header={rs_header}, pull_status={pull_status}, last_cur_pairs={last_cur_pairs}, cur_pairs={cur_pairs}"
                        )
                else:
                    L.debug(
                        f"getting status DO finished for rs_key={rs_key}, rs_header={rs_header}, pull_status={pull_status}, cur_pairs={cur_pairs}, total_batches={total_batches}"
                    )
                    rp = self.ctx.rp_ctx.load(name=rp_name,
                                              namespace=rp_namespace)

                    clear_future = self._receive_executor_pool.submit(
                        rp.with_stores,
                        clear_status,
                        options={"__op": "clear_status"})
                    if data_type == "object":
                        result = pickle.loads(b''.join(
                            map(
                                lambda t: t[1],
                                sorted(rp.get_all(),
                                       key=lambda x: int.from_bytes(
                                           x[0], "big")))))
                        rp.destroy()
                        L.debug(
                            f"pulled object: rs_key={rs_key}, rs_header={rs_header}, is_none={result is None}, "
                            f"elapsed={time.time() - start_time}")
                    else:
                        result = rp

                        if L.isEnabledFor(logging.DEBUG):
                            L.debug(
                                f"pulled roll_pair: rs_key={rs_key}, rs_header={rs_header}, rp.count={rp.count()}, "
                                f"elapsed={time.time() - start_time}")

                    clear_future.result()
                    return result
                last_total_batches = total_batches
                last_cur_pairs = cur_pairs
            raise IOError(
                f"roll site pull failed. max try exceeded: {self.pull_max_retry}, rs_key={rs_key}, "
                f"rs_header={rs_header}, pull_status={pull_status}")
        except Exception as e:
            L.exception(
                f"fatal error: when pulling rs_key={rs_key}, rs_header={rs_header}, attempts={pull_attempts}"
            )
            raise e
Beispiel #3
0
    def _push_rollpair(self,
                       rp: RollPair,
                       rs_header: ErRollSiteHeader,
                       options: dict = None):
        if options is None:
            options = {}
        rs_key = rs_header.get_rs_key()
        if L.isEnabledFor(logging.DEBUG):
            L.debug(
                f"pushing rollpair: rs_key={rs_key}, rs_header={rs_header}, rp.count={rp.count()}"
            )
        start_time = time.time()

        rs_header._total_partitions = rp.get_partitions()
        serdes = options.get('serdes', None)
        if serdes is not None:
            rs_header._options['serdes'] = serdes

        wrapee_cls = options.get('wrapee_cls', None)
        if serdes is not None:
            rs_header._options['wrapee_cls'] = wrapee_cls

        batches_per_stream = self.push_batches_per_stream
        body_bytes = self.batch_body_bytes
        endpoint = self.ctx.proxy_endpoint
        max_retry_cnt = self.push_max_retry
        long_retry_cnt = self.push_long_retry
        per_stream_timeout = self.push_per_stream_timeout

        def _push_partition(ertask):
            rs_header._partition_id = ertask._inputs[0]._id
            L.trace(
                f"pushing rollpair partition. rs_key={rs_key}, partition_id={rs_header._partition_id}, rs_header={rs_header}"
            )

            from eggroll.core.grpc.factory import GrpcChannelFactory
            from eggroll.core.proto import proxy_pb2_grpc
            grpc_channel_factory = GrpcChannelFactory()

            with create_adapter(ertask._inputs[0]) as db, db.iteritems() as rb:
                # NOTICE AGAIN: all modifications to rs_header are limited in bs_helper.
                # rs_header is shared by bs_helper and here. any modification in bs_helper affects this header.
                # Remind that python's object references are passed by value,
                # meaning the 'pointer' is copied, while the contents are modificable
                bs_helper = _BatchStreamHelper(rs_header)
                bin_batch_streams = bs_helper._generate_batch_streams(
                    pair_iter=rb,
                    batches_per_stream=batches_per_stream,
                    body_bytes=body_bytes)

                channel = grpc_channel_factory.create_channel(endpoint)
                stub = proxy_pb2_grpc.DataTransferServiceStub(channel)
                for batch_stream in bin_batch_streams:
                    batch_stream_data = list(batch_stream)
                    cur_retry = 0
                    exception = None
                    while cur_retry < max_retry_cnt:
                        L.trace(
                            f'pushing rollpair partition stream. rs_key={rs_key}, partition_id={rs_header._partition_id}, rs_header={rs_header}, cur_retry={cur_retry}'
                        )
                        try:
                            stub.push(bs_helper.generate_packet(
                                batch_stream_data, cur_retry),
                                      timeout=per_stream_timeout)
                            exception = None
                            break
                        except Exception as e:
                            if cur_retry < max_retry_cnt - long_retry_cnt:
                                retry_interval = round(
                                    min(2 * cur_retry, 20) +
                                    random.random() * 10, 3)
                            else:
                                retry_interval = round(
                                    300 + random.random() * 10, 3)
                            L.warn(
                                f"push rp partition error. rs_key={rs_key}, partition_id={rs_header._partition_id}, rs_header={rs_header}, max_retry_cnt={max_retry_cnt}, cur_retry={cur_retry}, retry_interval={retry_interval}",
                                exc_info=e)
                            time.sleep(retry_interval)
                            if isinstance(e, RpcError) and e.code(
                            ).name == 'UNAVAILABLE':
                                channel = grpc_channel_factory.create_channel(
                                    endpoint, refresh=True)
                                stub = proxy_pb2_grpc.DataTransferServiceStub(
                                    channel)

                            exception = e
                        finally:
                            cur_retry += 1
                    if exception is not None:
                        L.exception(
                            f"push partition failed. rs_key={rs_key}, partition_id={rs_header._partition_id}, rs_header={rs_header}, cur_retry={cur_retry}",
                            exc_info=exception)
                        raise exception
                    L.trace(
                        f'pushed rollpair partition stream. rs_key={rs_key}, partition_id={rs_header._partition_id}, rs_header={rs_header}, retry count={cur_retry - 1}'
                    )

            L.trace(
                f"pushed rollpair partition. rs_key={rs_key}, partition_id={rs_header._partition_id}, rs_header={rs_header}"
            )

        rp.with_stores(_push_partition, options={"__op": "push_partition"})
        if L.isEnabledFor(logging.DEBUG):
            L.debug(
                f"pushed rollpair: rs_key={rs_key}, rs_header={rs_header}, count={rp.count()}, elapsed={time.time() - start_time}"
            )
        self.ctx.pushing_latch.count_down()
Beispiel #4
0
    def _push_bytes(self,
                    obj,
                    rs_header: ErRollSiteHeader,
                    options: dict = None):
        if options is None:
            options = {}
        start_time = time.time()

        rs_key = rs_header.get_rs_key()
        int_size = 4

        if L.isEnabledFor(logging.DEBUG):
            L.debug(f"pushing object: rs_key={rs_key}, rs_header={rs_header}")

        def _generate_obj_bytes(py_obj, body_bytes):
            key_id = 0
            obj_bytes = pickle.dumps(py_obj)
            obj_bytes_len = len(obj_bytes)
            cur_pos = 0

            while cur_pos <= obj_bytes_len:
                yield key_id.to_bytes(
                    int_size, "big"), obj_bytes[cur_pos:cur_pos + body_bytes]
                key_id += 1
                cur_pos += body_bytes

        rs_header._partition_id = 0
        rs_header._total_partitions = 1

        serdes = options.get('serdes', None)
        if serdes is not None:
            rs_header._options['serdes'] = serdes

        wrapee_cls = options.get('wrapee_cls', None)
        if serdes is not None:
            rs_header._options['wrapee_cls'] = wrapee_cls
        # NOTICE: all modifications to rs_header are limited in bs_helper.
        # rs_header is shared by bs_helper and here. any modification in bs_helper affects this header.
        # Remind that python's object references are passed by value,
        # meaning the 'pointer' is copied, while the contents are modificable
        bs_helper = _BatchStreamHelper(rs_header=rs_header)
        bin_batch_streams = bs_helper._generate_batch_streams(
            pair_iter=_generate_obj_bytes(obj, self.batch_body_bytes),
            batches_per_stream=self.push_batches_per_stream,
            body_bytes=self.batch_body_bytes)

        grpc_channel_factory = GrpcChannelFactory()
        channel = grpc_channel_factory.create_channel(self.ctx.proxy_endpoint)
        stub = proxy_pb2_grpc.DataTransferServiceStub(channel)
        max_retry_cnt = self.push_max_retry
        long_retry_cnt = self.push_long_retry
        per_stream_timeout = self.push_per_stream_timeout

        # if use stub.push.future here, retry mechanism is a problem to solve
        for batch_stream in bin_batch_streams:
            cur_retry = 0

            batch_stream_data = list(batch_stream)
            exception = None
            while cur_retry < max_retry_cnt:
                L.trace(
                    f'pushing object stream. rs_key={rs_key}, rs_header={rs_header}, cur_retry={cur_retry}'
                )
                try:
                    stub.push(bs_helper.generate_packet(
                        batch_stream_data, cur_retry),
                              timeout=per_stream_timeout)
                    exception = None
                    break
                except Exception as e:
                    if cur_retry <= max_retry_cnt - long_retry_cnt:
                        retry_interval = round(
                            min(2 * cur_retry, 20) + random.random() * 10, 3)
                    else:
                        retry_interval = round(300 + random.random() * 10, 3)
                    L.warn(
                        f"push object error. rs_key={rs_key}, partition_id={rs_header._partition_id}, rs_header={rs_header}, max_retry_cnt={max_retry_cnt}, cur_retry={cur_retry}, retry_interval={retry_interval}",
                        exc_info=e)
                    time.sleep(retry_interval)
                    if isinstance(e,
                                  RpcError) and e.code().name == 'UNAVAILABLE':
                        channel = grpc_channel_factory.create_channel(
                            self.ctx.proxy_endpoint, refresh=True)
                        stub = proxy_pb2_grpc.DataTransferServiceStub(channel)
                    exception = e
                finally:
                    cur_retry += 1
            if exception is not None:
                L.exception(
                    f"push object failed. rs_key={rs_key}, partition_id={rs_header._partition_id}, rs_header={rs_header}, cur_retry={cur_retry}",
                    exc_info=exception)
                raise exception
            L.trace(
                f'pushed object stream. rs_key={rs_key}, rs_header={rs_header}, cur_retry={cur_retry - 1}'
            )

        L.debug(
            f"pushed object: rs_key={rs_key}, rs_header={rs_header}, is_none={obj is None}, elapsed={time.time() - start_time}"
        )
        self.ctx.pushing_latch.count_down()