Exemple #1
0
    def __init__(self, params: ExpertParams, creator: TensorCreator):
        super().__init__(creator.device)
        float_dtype = get_float(self._device)

        self.params = params
        flock_size = params.flock_size
        self.n_cluster_centers = params.n_cluster_centers
        self.seq_lookbehind = params.temporal.seq_lookbehind
        # self.context_size = self.n_cluster_centers * 2
        self.n_providers = self.params.temporal.n_providers

        # Context is: <SP_output>
        #             <Rewards>
        #             <Punishments>
        #
        #             <Pred_clusters for next step>
        #             <NaNs>
        #             <NaNs>

        # With optional NaN Padding depending on the context size in the params
        self.output_context = creator.full(
            (flock_size, 2, NUMBER_OF_CONTEXT_TYPES, self.n_cluster_centers),
            fill_value=float("nan"),
            device=self._device,
            dtype=float_dtype)

        self.index_tensor = creator.arange(
            start=0, end=flock_size,
            device=self._device).view(-1, 1).expand(flock_size,
                                                    self.n_cluster_centers)

        self.create_flocks(params, creator)
Exemple #2
0
    def __init__(self, creator: TensorCreator, dim, input_shape, split_sizes,
                 dtype):
        super().__init__(creator.device)

        self._input_shape = input_shape
        self._split_sizes = split_sizes
        self._dim = dim
        self.output_tensors = []
        self._indices = []

        if -1 in split_sizes:
            raise InvalidParameterException(
                "Automatic split size not supported")

        input_dim_size = input_shape[dim]

        outputs_size = sum(split_sizes)
        if outputs_size > input_dim_size:
            message = f"The combined output size ({outputs_size} is larger than dimension {dim}: {input_dim_size})"
            raise InvalidParameterException(message)

        split_start = 0
        for split in split_sizes:
            output_dims = list(input_shape)
            output_dims[dim] = split
            output_tensor = creator.zeros(output_dims,
                                          dtype=dtype,
                                          device=self._device)
            self.output_tensors.append(output_tensor)

            # Create the index which will be used for the data splitting.
            index = creator.arange(split_start,
                                   split_start + split,
                                   dtype=torch.long,
                                   device=self._device)

            self._indices.append(index)
            split_start += split
def _arange_non_divisible(creator: TensorCreator):
    return creator.arange(0, end=4, step=2)