Exemple #1
0
    def __exit__(self, exc_type, exc_value, exc_traceback):
        """ Defines the exit protocol from asyncio task.

        Args:
            exc_type (Type): The type of the exception.
            exc_value (RuntimeError): The value of the exception, typically RuntimeError. 
            exc_traceback (traceback): The traceback that can be printed for this exception, detailing where error actually happend.

        Returns:
            Neuron: present instance of Neuron.
        """
        self.stop()
        if exc_value:

            top_stack = StringIO()
            tb.print_stack(file=top_stack)
            top_lines = top_stack.getvalue().strip('\n').split('\n')[:-4]
            top_stack.close()

            full_stack = StringIO()
            full_stack.write('Traceback (most recent call last):\n')
            full_stack.write('\n'.join(top_lines))
            full_stack.write('\n')
            tb.print_tb(exc_traceback, file=full_stack)
            full_stack.write('{}: {}'.format(exc_type.__name__,
                                             str(exc_value)))
            sinfo = full_stack.getvalue()
            full_stack.close()
            # Log the combined stack
            logger.error('Exception:{}'.format(sinfo))

            if rollbar.is_enabled():
                rollbar.send_exception()

        return self
Exemple #2
0
    def backward(ctx, grads: torch.FloatTensor,
                 code: torch.FloatTensor) -> Optional[torch.Tensor]:
        """ Internal autograd-friendly Backward RPC call to a remote neuron (calls the Backward method on an remote Axon terminal.)

            Args:
                ctx: (:obj:`torch.autograd.ctx`, `required`):
                    Autograd context, saves state information between forward and backward calls. i.e. inputs for gradient computation.
  
                grads (:obj:`List[torch.Tensor]` of shape :obj:`(shape)`, `required`):
                    Gradients of this function's outputs computed during the loss.backward() call.

                code (:obj:`bittensor.proto.Modality` of shape :obj:`(1)`, `required`):
                    Code output from the forward call.

            Returns:
                output (:obj:`Tuple[torch.FloatTensor`, torch.LongTensor]`, `optional`):
                    Gradients of the inputs with respect to the inputs and grads of the outputs.
        """
        # ---- Zeros response in the case of failure ----
        zeros = nill_response_for(ctx.inputs)

        # ---- Check if are passing gradients ----
        if not ctx.caller.config.receptor.pass_gradients:
            return (None, None, zeros, None)

        # ---- Check that forward query was a success ----
        if code.item() != bittensor.proto.ReturnCode.Success:
            return (None, None, zeros, None)

        # ---- Try to pass gradients ----
        else:
            try:

                # ---- Get forward call serialzied inputs ----
                try:
                    serialized_inputs = ctx.serialized_inputs
                except:
                    logger.trace(
                        'backward failed because forward previously failed.')
                    return (None, None, zeros, None)

                # ---- Serialization ----
                try:
                    # ---- Get serializer ----
                    serializer = serialization.get_serializer(
                        bittensor.proto.Serializer.MSGPACK)

                    # ---- Serialize grads to bitensor_pb2.Tensors ----
                    serialized_grads = serializer.serialize(
                        grads,
                        modality=bittensor.proto.Modality.TENSOR,
                        from_type=bittensor.proto.TensorType.TORCH)

                except Exception as e:
                    logger.trace(
                        'backward failed during serialization of gradients.')
                    return (None, None, zeros, None)

                # ---- Build request for backward ----
                request = bittensor.proto.TensorMessage(
                    version=bittensor.__version__,
                    public_key=ctx.caller.wallet.hotkey.public_key,
                    nounce=ctx.caller.nounce,
                    signature=ctx.caller.signature,
                    tensors=[serialized_inputs, serialized_grads])

                # --- Send non blocking grad request ----
                # NOTE(const): we dont care about the response.
                try:
                    ctx.caller.stats.backward_qps.update(1)
                    ctx.caller.stats.backwar_bytes_out.update(
                        sys.getsizeof(request))
                    ctx.caller.stub.Backward.future(
                        request, timeout=ctx.caller.config.receptor.timeout)
                    ctx.caller.stats.backwar_bytes_in.update(
                        0.0)  # responses are dropped.

                except:
                    logger.trace(
                        'backward failed during backward call. Do not care.')
                    return (None, None, zeros, None)

                # ---- Always return zeros ----
                # NOTE(const): We can return non zeros but a remote host could mess with your training
                # without you knowing about it. i.e. by passing you malicious gradients.
                return (None, None, zeros, None)

            except:

                # ---- Catch all exceptions in Backward ----
                rollbar.send_exception()
                return (None, None, zeros, None)