Example #1
0
    def _apply_model_parallel(self, tensor, encoder_output, encoder_mask, incr_state):
        """
        Pipeline application of model parallelism.
        """
        chunks = PipelineHelper.split(
            (tensor, encoder_output, encoder_mask, incr_state)
        )
        work_items = PipelineHelper.schedule_work_items(self.layers, chunks)

        new_incr_state = {i: [] for i, _ in enumerate(self.layers)}

        for chunk_idx, layer_nos, next_device in work_items:
            s_tensor, s_enc_out, s_enc_mask, s_incr_state = chunks[chunk_idx]
            for layer_no in layer_nos:
                s_tensor, nis = self.layers[layer_no](
                    x=s_tensor,
                    encoder_output=s_enc_out,
                    encoder_mask=s_enc_mask,
                    incr_state=s_incr_state.get(layer_no),
                )
                new_incr_state[layer_no].append(nis)
            # don't move incr state, it's always on the correct device
            s_tensor, s_enc_out, s_enc_mask = PipelineHelper.chunk_to(
                (s_tensor, s_enc_out, s_enc_mask), next_device
            )
            chunks[chunk_idx] = (s_tensor, s_enc_out, s_enc_mask, s_incr_state)

        tensor_out = PipelineHelper.join([c[0] for c in chunks])
        new_incr_state = {
            layer_no: PipelineHelper.join(pieces)
            for layer_no, pieces in new_incr_state.items()
        }

        return tensor_out, new_incr_state
Example #2
0
    def _apply_model_parallel(self, tensor, encoder_output, encoder_mask,
                              incr_state):
        """
        Pipeline application of model parallelism.
        """
        chunks = PipelineHelper.split(
            (tensor, encoder_output, encoder_mask, incr_state))
        work_items = PipelineHelper.schedule_work_items(self.layers, chunks)

        new_incr_state = [{} for _ in chunks]

        for chunk_idx, layer_nos, next_device in work_items:
            s_tensor, s_enc_out, s_enc_mask, s_incr_state = chunks[chunk_idx]
            for layer_no in layer_nos:
                s_tensor, new_incr_state[chunk_idx][layer_no] = self.layers[
                    layer_no](
                        x=s_tensor,
                        encoder_output=s_enc_out,
                        encoder_mask=s_enc_mask,
                        incr_state=s_incr_state.get(layer_no),
                    )
            chunks[chunk_idx] = PipelineHelper.chunk_to(
                (s_tensor, s_enc_out, s_enc_mask, s_incr_state), next_device)

        tensor_out = PipelineHelper.join([c[0] for c in chunks])
        new_incr_state = PipelineHelper.join(new_incr_state)

        return tensor_out, new_incr_state
    def _apply_model_parallel_with_extra(
        self,
        tensor,
        encoder_output,
        encoder_mask,
        incr_state,
        extra_output: torch.Tensor = None,
        extra_mask: torch.Tensor = None,
    ) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
        """
        Copy paste from TransformerDecoder._apply_model_parallel while incorporating the
        extra output/extra mask.
        """
        chunks = PipelineHelper.split(
            (tensor, encoder_output, encoder_mask, incr_state, extra_output, extra_mask)
        )
        work_items = PipelineHelper.schedule_work_items(self.layers, chunks)

        new_incr_state = {i: [] for i, _ in enumerate(self.layers)}

        for chunk_idx, layer_nos, next_device in work_items:
            s_tensor, s_enc_out, s_enc_mask, s_incr_state, s_extra_out, s_extra_mask = chunks[
                chunk_idx
            ]
            for layer_no in layer_nos:
                s_tensor, nis = self.layers[layer_no](
                    x=s_tensor,
                    encoder_output=s_enc_out,
                    encoder_mask=s_enc_mask,
                    incr_state=s_incr_state.get(layer_no),
                    extra_output=s_extra_out,
                    extra_mask=s_extra_mask,
                )
                new_incr_state[layer_no].append(nis)
            # don't move incr state, it's always on the correct device
            s_tensor, s_enc_out, s_enc_mask, s_extra_out, s_extra_mask = PipelineHelper.chunk_to(
                (s_tensor, s_enc_out, s_enc_mask, s_extra_out, s_extra_mask),
                next_device,
            )
            chunks[chunk_idx] = (
                s_tensor,
                s_enc_out,
                s_enc_mask,
                s_incr_state,
                s_extra_out,
                s_extra_mask,
            )

        tensor_out = PipelineHelper.join([c[0] for c in chunks])
        new_incr_state = {
            layer_no: PipelineHelper.join(pieces)
            for layer_no, pieces in new_incr_state.items()
        }

        return tensor_out, new_incr_state  # type: ignore
Example #4
0
    def _apply_model_parallel(self, tensor, mask):
        """
        Pipeline application of model parallelism.
        """
        chunks = PipelineHelper.split((tensor, mask))
        work_items = PipelineHelper.schedule_work_items(self.layers, chunks)

        for chunk_idx, layer_nos, next_device in work_items:
            s_tensor, s_mask = chunks[chunk_idx]
            for layer_no in layer_nos:
                s_tensor = self.layers[layer_no](s_tensor, s_mask)
            chunks[chunk_idx] = PipelineHelper.chunk_to((s_tensor, s_mask), next_device)

        tensor_out, mask_out = PipelineHelper.join(chunks)
        return tensor_out
Example #5
0
    def _apply_model_parallel(self, tensor, mask,
                              **kwargs) -> Tuple[torch.Tensor, torch.Tensor]:
        """
        Override to return attention weights.
        """
        chunks = PipelineHelper.split((tensor, mask))
        work_items = PipelineHelper.schedule_work_items(self.layers, chunks)

        for chunk_idx, layer_nos, next_device in work_items:
            s_weights = None
            try:
                s_tensor, s_mask = chunks[chunk_idx]
            except ValueError:
                s_tensor, s_mask, s_weights = chunks[chunk_idx]
            for layer_no in layer_nos:
                s_tensor, s_weights = self.layers[layer_no](s_tensor, s_mask,
                                                            **kwargs)
            chunks[chunk_idx] = PipelineHelper.chunk_to(
                (s_tensor, s_mask, s_weights), next_device)
        joined = PipelineHelper.join(chunks)
        tensor_out, out_mask, weights = joined
        return tensor_out, weights