Example #1
0
    def __init__(self,
                 creator: TensorCreator,
                 lower_bound: int,
                 upper_bound: int,
                 random: np.random,
                 generate_new_every_n: int = 1,
                 generate_random_intervals=False):
        super().__init__(creator.device)

        self._generate_new_every_n = generate_new_every_n
        self._next_generation = generate_new_every_n
        self._step = -1
        self._lower_bound = lower_bound
        self._upper_bound = upper_bound
        self._random = random
        self._random_generation_intervals = generate_random_intervals

        self._scalar_output = creator.full([1],
                                           fill_value=FLOAT_NAN,
                                           dtype=self._float_dtype,
                                           device=self._device)
        self._one_hot_output = creator.full([self._upper_bound],
                                            fill_value=FLOAT_NAN,
                                            dtype=self._float_dtype,
                                            device=self._device)

        self._current_value = creator.zeros((1, ), dtype=creator.long)
Example #2
0
    def __init__(self, creator: TensorCreator, tensor_shape: Tuple):

        self._odd_tensor = creator.full(tensor_shape,
                                        FLOAT_NAN,
                                        device=creator.device,
                                        dtype=creator.float)
        self._even_tensor = creator.full(tensor_shape,
                                         FLOAT_NAN,
                                         device=creator.device,
                                         dtype=creator.float)

        self._num_writes = 0
Example #3
0
    def __init__(self, params: ExpertParams, creator: TensorCreator):
        super().__init__(creator.device)
        float_dtype = get_float(self._device)

        self.params = params
        flock_size = params.flock_size
        self.n_cluster_centers = params.n_cluster_centers
        self.seq_lookbehind = params.temporal.seq_lookbehind
        # self.context_size = self.n_cluster_centers * 2
        self.n_providers = self.params.temporal.n_providers

        # Context is: <SP_output>
        #             <Rewards>
        #             <Punishments>
        #
        #             <Pred_clusters for next step>
        #             <NaNs>
        #             <NaNs>

        # With optional NaN Padding depending on the context size in the params
        self.output_context = creator.full(
            (flock_size, 2, NUMBER_OF_CONTEXT_TYPES, self.n_cluster_centers),
            fill_value=float("nan"),
            device=self._device,
            dtype=float_dtype)

        self.index_tensor = creator.arange(
            start=0, end=flock_size,
            device=self._device).view(-1, 1).expand(flock_size,
                                                    self.n_cluster_centers)

        self.create_flocks(params, creator)
Example #4
0
    def __init__(self, creator: TensorCreator, params: DatasetSENavigationParams, random: np.random.RandomState):
        super().__init__(creator.device)
        self._params = params.clone()
        self._random = random

        dataset = DatasetSeTask1(self._params.dataset_size)
        size, images, labels = dataset.get_all()
        self._img_size = size
        self._n_samples = len(images)
        self._positions_permuted = None

        # load the images and positions
        self._images = images.type(self._float_dtype).to(self._device)

        self._positions = labels.type(self._float_dtype).to(self._device) / 100.0

        # compute landmarks for each position
        divisor = SpaceDivisor(self._params.horizontal_segments,
                               self._params.vertical_segments,
                               self._device)
        self._landmarks, self._landmarks_one_hot = divisor.get_landmarks(self._positions)

        # prepare the tensors for the first step
        if self._params.channel_first:
            self._images = self._images.permute(0, 3, 1, 2)
            img_size = [DatasetSeBase.N_CHANNELS, *self._params.dataset_dims]
        else:
            img_size = [*self._params.dataset_dims, DatasetSeBase.N_CHANNELS]

        self.last_image = creator.full(img_size,
                                       fill_value=FLOAT_NAN,
                                       dtype=self._float_dtype,
                                       device=self._device)

        self.last_position = creator.full(self._positions[0].shape,
                                          fill_value=FLOAT_NAN,
                                          dtype=self._float_dtype,
                                          device=self._device)

        self.last_landmark = creator.full(self._landmarks[0].shape,
                                          fill_value=FLOAT_NAN,
                                          dtype=self._float_dtype,
                                          device=self._device)
        self.last_landmark_one_hot = creator.full(self._landmarks_one_hot[0].shape,
                                                  fill_value=FLOAT_NAN,
                                                  dtype=self._float_dtype,
                                                  device=self._device)

        self.task_id_const = creator.full([1], 1.0, dtype=self._float_dtype, device=self._device)
        self.testing_phase_indicator_const = creator.full([1], 0.0, dtype=self._float_dtype, device=self._device)
        self.unused_output = creator.full([1], FLOAT_NEG_INF, dtype=self._float_dtype, device=self._device)

        self._pos = -1
        self._pos_permuted = -1
    def __init__(self, creator: TensorCreator, params: DatasetSeObjectsParams,
                 random):
        super().__init__(creator.device)
        self._params = params
        self._random = random
        self._save_memory = params.save_gpu_memory

        dataset = DatasetSeTask0(self._params.dataset_size)
        size, train, test = dataset.get_all()

        self._train_instance_ids = train[2].to('cpu')
        self._test_instance_ids = test[2].to('cpu')

        self._img_size = size

        # load the images and labels
        if self._save_memory:
            device = 'cpu'
        else:
            device = self._device
        self._train_images = train[0].to(device)
        self._train_labels = train[1].to(self._device)
        self._test_images = test[0].to(device)
        self._test_labels = test[1].to(self._device)

        # prepare the tensors for the first step
        img_size = [*self._params.dataset_dims, DatasetSeBase.N_CHANNELS]
        num_classes = self._train_labels.shape[1]

        self.last_image = self._create_tensor(img_size, FLOAT_NAN, creator)
        self.last_label = self._create_tensor([num_classes], FLOAT_NAN,
                                              creator)
        self.last_truth = self._create_tensor([num_classes], FLOAT_NAN,
                                              creator)
        self.hidden_label = self._create_tensor([num_classes], FLOAT_NAN,
                                                creator)
        self.last_instance_id = self._create_tensor([1], FLOAT_NAN, creator)

        self.task_id_const = self._create_tensor([1], 0.0, creator)
        self.unused_output = self._create_tensor([1], FLOAT_NEG_INF, creator)
        self.testing_phase_indicator = self._create_tensor([1], FLOAT_NAN,
                                                           creator)

        self.training_pos = creator.full([1],
                                         fill_value=-1,
                                         dtype=torch.long,
                                         device=self._device)

        self._reset_indexes_and_filtering()
Example #6
0
 def _create_tensor(self, sizes, creator: TensorCreator):
     return creator.full(sizes,
                         fill_value=FLOAT_NAN,
                         dtype=self._float_dtype,
                         device=self._device)
Example #7
0
def _full(creator: TensorCreator):
    return creator.full((2, 3, 1, 1), fill_value=-3)
Example #8
0
    def __init__(self, params: ExpertParams, creator: TensorCreator = None):
        """Initialises the flock.

        Args:
            params (ExpertParams): The contain for the parameters which will be used for this flock
            creator (TensorCreator): The creator which will allocate the tensors
        """

        super().__init__(params, creator.device)

        self.n_cluster_centers = params.n_cluster_centers
        self.flock_size = params.flock_size
        self.enable_learning = params.spatial.enable_learning
        float_dtype = get_float(self._device)

        sp_params = params.spatial
        self.input_size = sp_params.input_size
        self.buffer_size = sp_params.buffer_size
        self.batch_size = sp_params.batch_size
        self.learning_period = sp_params.learning_period
        self.max_boost_time = sp_params.max_boost_time
        self.cluster_boost_threshold = sp_params.cluster_boost_threshold
        self.learning_rate = sp_params.learning_rate
        self._boost = sp_params.boost
        self._sampling_method = sp_params.sampling_method

        self.buffer = SPFlockBuffer(creator=creator,
                                    flock_size=self.flock_size,
                                    buffer_size=self.buffer_size,
                                    input_size=self.input_size,
                                    n_cluster_centers=self.n_cluster_centers)

        # The initial clusters are randomised
        self.cluster_centers = creator.zeros(
            (self.flock_size, self.n_cluster_centers, self.input_size),
            device=self._device,
            dtype=float_dtype)
        self.initialize_cluster_centers()

        self.cluster_boosting_durations = creator.full(
            (self.flock_size, self.n_cluster_centers),
            fill_value=self.cluster_boost_threshold,
            device=self._device,
            dtype=creator.int64)

        self.prev_boosted_clusters = creator.zeros(
            (self.flock_size, self.n_cluster_centers),
            device=self._device,
            dtype=creator.uint8)

        # For holding the targets and deltas of the cluster centers
        self.cluster_center_targets = creator.zeros(
            (self.flock_size, self.n_cluster_centers, self.input_size),
            device=self._device,
            dtype=float_dtype)
        self.cluster_center_deltas = creator.zeros(
            (self.flock_size, self.n_cluster_centers, self.input_size),
            device=self._device,
            dtype=float_dtype)
        self.boosting_targets = creator.zeros(
            (self.flock_size, self.n_cluster_centers),
            device=self._device,
            dtype=creator.int64)
        self.tmp_boosting_targets = creator.zeros(
            (self.flock_size, self.n_cluster_centers),
            device=self._device,
            dtype=creator.int64)

        # Output tensor of cluster center vectors into which to integrate the forward pass stuff
        self.forward_clusters = creator.zeros(
            (self.flock_size, self.n_cluster_centers),
            device=self._device,
            dtype=float_dtype)

        self.predicted_clusters = creator.zeros(
            (self.flock_size, self.n_cluster_centers),
            device=self._device,
            dtype=float_dtype)

        self.current_reconstructed_input = creator.zeros(
            (self.flock_size, self.input_size),
            device=self._device,
            dtype=float_dtype)
        self.predicted_reconstructed_input = creator.zeros(
            (self.flock_size, self.input_size),
            device=self._device,
            dtype=float_dtype)

        # How many times did the spatial pooler forward and learning process run
        self.execution_counter_forward = creator.zeros((self.flock_size, 1),
                                                       device=self._device,
                                                       dtype=creator.int64)
        self.execution_counter_learning = creator.zeros((self.flock_size, 1),
                                                        device=self._device,
                                                        dtype=creator.int64)
Example #9
0
    def __init__(self, params: ExpertParams, creator: TensorCreator = torch):
        """Initialises the flock.

        Args:
            params: parameters of the flock, see ExpertParams form default values.
            creator:
        """
        super_params = params.clone()
        super_params.spatial.buffer_size = 1  # these are just internal buffers local to each expert,
        # they do not learn from them.
        super().__init__(super_params, creator)

        super()._validate_universal_params(params)
        self._validate_conv_learning_params(params)

        float_dtype = get_float(self._device)

        # Common buffer where each flock stores data and from which they learn.
        self.common_buffer = SPFlockBuffer(
            creator=creator,
            flock_size=1,
            buffer_size=params.spatial.buffer_size,
            input_size=self.input_size,
            n_cluster_centers=self.n_cluster_centers)

        # The initial clusters are randomised
        self.common_cluster_centers = creator.randn(
            (1, self.n_cluster_centers, self.input_size),
            device=self._device,
            dtype=float_dtype)

        # Virtual replications of the common cluster centers, mainly for observation purposes.
        self.cluster_centers = self.common_cluster_centers.expand(
            self.flock_size, self.n_cluster_centers, self.input_size)

        # For keeping track of which clusters are being boosted and for how long
        self.cluster_boosting_durations = creator.full(
            (1, self.n_cluster_centers),
            fill_value=self.cluster_boost_threshold,
            device=self._device,
            dtype=creator.int64)

        self.prev_boosted_clusters = creator.zeros((1, self.n_cluster_centers),
                                                   device=self._device,
                                                   dtype=creator.uint8)

        # For holding the targets and deltas of the cluster centers
        self.cluster_center_targets = creator.zeros(
            (1, self.n_cluster_centers, self.input_size),
            device=self._device,
            dtype=float_dtype)
        self.cluster_center_deltas = creator.zeros(
            (1, self.n_cluster_centers, self.input_size),
            device=self._device,
            dtype=float_dtype)
        self.boosting_targets = creator.zeros((1, self.n_cluster_centers),
                                              device=self._device,
                                              dtype=creator.int64)
        self.tmp_boosting_targets = creator.zeros((1, self.n_cluster_centers),
                                                  device=self._device,
                                                  dtype=creator.int64)

        # There only needs to be one learning counter as only one expert learns in the convolutional case,
        # but we need the expanded version for cluster observer.
        self.common_execution_counter_learning = creator.zeros(
            (1, 1), device=self._device, dtype=creator.int64)
        self.execution_counter_learning = self.common_execution_counter_learning.expand(
            self.flock_size, 1)
Example #10
0
 def _create_tensor(self, shape: Union[List[int], torch.Size],
                    fill_value: float, creator: TensorCreator):
     return creator.full(shape,
                         fill_value=fill_value,
                         dtype=self._float_dtype,
                         device=self._device)
Example #11
0
 def __init__(self, creator: TensorCreator, shape, constant):
     super().__init__(creator.device)
     self.output = creator.full(shape, fill_value=constant, dtype=self._float_dtype, device=creator.device)