class GradualLearningBasicTopology(Topology):
    """
    Long words utilizing context

    Interesting observers:
      gate:
        * SP Learn Process - Data Batch, sum over dim 1 (zero values means sequence not present in batch)
        * SP cluster centers
        * SP output forward clusters
      specialist:
        * SP_frequent_seqs_reconstruction - symbols reconstruction
        * TP_frequent_context_likelihood - show context per each symbol in learnt sequences(items per row 2)
        * TP_seq_likelihoods_by_cluster
    """
    _n_accuracy_2: AccuracyNode
    _n_accuracy_1: AccuracyNode
    _n_accuracy_single_2: AccuracyNode
    _n_accuracy_single_1: AccuracyNode
    _n_dataset_switch: DatasetSwitchNodeGroup
    _n_specialist: SpecialistNodeGroup
    _prop_builder: ObserverPropertiesBuilder
    _step_count: int = 0
    _active_dataset: int = 0

    def __init__(self, params: GradualLearningBasicTopologyParams = GradualLearningBasicTopologyParams()):
        super().__init__('cuda')
        self._prop_builder = ObserverPropertiesBuilder(self, source_type=ObserverPropertiesItemSourceType.MODEL)

        self._params = params
        self.create_topology()

    @property
    def params(self):
        return self._params

    def create_topology(self):
        """
                                        +----------------+
            +-------------+             | dataset_switch |
            |             |             +--+-----+-------+
            |             v                |     |
            |  +----------+------------+   |     |
            |  | context_feedback_pass |   |     |
            |  +--------------------+--+   |     |
            |                       |      |     |
            |                       v      v     |
            |               +-------+------+--+  |
            |               | gate_input_join |  |
            |               +-------+---------+  |
            |                       |            |
            |                       v            |
            |              +--------+---------+  |
            |              | gate_input_noise |  |
            |              +--------+---------+  |
            |                       |            |
            |                       v            |
            |                   +---+--+         |
            |                   | gate |         |
            |                   +---+--+         |
            |                       |            |
            |                       v            |
            |               +-------+--------+   +--------+
            |               | format_context |   |        |
            |               +-------+--------+   |        |
            |                       |            v        |
            |                       |     +------+-----+  |
            |                       ---->-+ specialist |  |
            |                             +--+--------++  |
            |                                |        |   |
            +--------------------------------+        v   v
                                                   ++--------++
                                                   | accuracy |
                                                   +----------+
        """

        n_gate = SpatialPoolerFlockNode(
            ExpertParams(flock_size=self._params.flock_size,
                         n_cluster_centers=self._params.seq_count,
                         spatial=SpatialPoolerParams(
                             # input_size=3,
                             enable_learning=True,
                             buffer_size=self._params.gate_buffer_size,
                             batch_size=100,
                             learning_rate=0.2,
                             learning_period=10,
                             cluster_boost_threshold=100,
                             max_boost_time=200
                         ),
                         ),
            name="Gate"
        )
        self.add_node(n_gate)

        # Specialist
        n_specialist = SpecialistNodeGroup(SpecialistNodeGroupParams(
            flock_size=self._params.flock_size,
            n_symbols=len(self._params.symbols),
            gate_input_context_multiplier=self._params.gate_input_context_multiplier,
            gate_input_context_avg_window_size=self._params.gate_input_context_avg_window_size,
            seq_count=self._params.seq_count,
            convert_context_to_one_hot=self._params.convert_context_to_one_hot
        ))
        self.add_node(n_specialist)
        self._n_specialist = n_specialist

        n_context_feedback_pass = PassNode((self._params.flock_size, self._params.seq_count))
        n_gate_input_join = JoinNode(dim=1, n_inputs=2)
        n_gate_input_noise = RandomNoiseNode(RandomNoiseParams(amplitude=0.0001))
        n_format_context = SPFormatContextNodeGroup(self._params.seq_count, self._params.flock_size)

        self.add_node(n_context_feedback_pass)
        self.add_node(n_gate_input_join)
        self.add_node(n_gate_input_noise)
        self.add_node(n_format_context)

        # Dataset
        n_dataset_switch = DatasetSwitchNodeGroup(DatasetSwitchNodeGroupParams(
            dataset_params=DatasetAlphabetNodeGroupParams(
                flock_size=self._params.flock_size,
                symbols=self._params.symbols,
                seq_length=self._params.seq_length,
                seq_count=self._params.seq_count,
                seq_repeat=self._params.seq_repeat
            ),
            flock_split=self._params.flock_split
        ))

        self._n_dataset_switch = n_dataset_switch
        self.add_node(n_dataset_switch)

        # dataset to specialist
        Connector.connect(n_dataset_switch.outputs.output, n_specialist.inputs.input)
        # specialist to gate
        Connector.connect(n_specialist.outputs.context_feedback, n_context_feedback_pass.inputs.input, is_backward=True)
        Connector.connect(n_context_feedback_pass.outputs.output, n_gate_input_join.inputs[0])
        # dataset to gate
        Connector.connect(n_dataset_switch.outputs.sequence_id_one_hot, n_gate_input_join.inputs[1])
        Connector.connect(n_gate_input_join.outputs.output, n_gate_input_noise.inputs.input)
        Connector.connect(n_gate_input_noise.outputs.output, n_gate.inputs.sp.data_input)
        # gate to specialist
        Connector.connect(n_gate.outputs.sp.forward_clusters, n_format_context.inputs.input)
        Connector.connect(n_format_context.outputs.output, n_specialist.inputs.context_input)

        # Measuring accuracy
        # Fork
        n_fork_dataset = ForkNode(0, [self._params.flock_split, self._params.flock_size - self._params.flock_split])
        n_fork_prediction = ForkNode(0, [self._params.flock_split, self._params.flock_size - self._params.flock_split])
        self.add_node(n_fork_dataset)
        self.add_node(n_fork_prediction)
        Connector.connect(n_dataset_switch.outputs.output, n_fork_dataset.inputs.input)
        Connector.connect(n_specialist.outputs.output, n_fork_prediction.inputs.input)

        self._n_accuracy_single_1 = AccuracyNode(1, name='Accuracy single 1')
        self.add_node(self._n_accuracy_single_1)
        Connector.connect(n_fork_dataset.outputs[0], self._n_accuracy_single_1.inputs.input_a)
        Connector.connect(n_fork_prediction.outputs[0], self._n_accuracy_single_1.inputs.input_b)

        self._n_accuracy_single_2 = AccuracyNode(1, name='Accuracy single 2')
        self.add_node(self._n_accuracy_single_2)
        Connector.connect(n_fork_dataset.outputs[1], self._n_accuracy_single_2.inputs.input_a)
        Connector.connect(n_fork_prediction.outputs[1], self._n_accuracy_single_2.inputs.input_b)

        self._n_accuracy_1 = AccuracyNode(self._params.accuracy_average_steps, name='Accuracy 1')
        self.add_node(self._n_accuracy_1)
        Connector.connect(n_fork_dataset.outputs[0], self._n_accuracy_1.inputs.input_a)
        Connector.connect(n_fork_prediction.outputs[0], self._n_accuracy_1.inputs.input_b)

        self._n_accuracy_2 = AccuracyNode(self._params.accuracy_average_steps, name='Accuracy 2')
        self.add_node(self._n_accuracy_2)
        Connector.connect(n_fork_dataset.outputs[1], self._n_accuracy_2.inputs.input_a)
        Connector.connect(n_fork_prediction.outputs[1], self._n_accuracy_2.inputs.input_b)

    def init_sp_clusters(self):
        self._n_dataset_switch.init_sp_clusters()
        self._n_specialist.init_sp_clusters()

    def set_sequences_filter(self, dataset_id: int, enabled_sequences: List[bool]):
        self._n_dataset_switch.set_sequences_filter(dataset_id, enabled_sequences)
        logger.info(f'sequence filter: {enabled_sequences}, step: {self._step_count}')

    @property
    def active_dataset(self) -> int:
        return self._active_dataset

    @active_dataset.setter
    def active_dataset(self, value: int):
        self._active_dataset = value
        self._n_dataset_switch.select_dataset(value)
        logger.info(f'active dataset: {value}, step: {self._step_count}')

    def get_properties(self) -> List[ObserverPropertiesItem]:
        props = super().get_properties()
        return props + [
            self._prop_builder.collapsible_header(f'Experiment', True),
            self._prop_builder.auto("Active dataset", type(self).active_dataset),
            *self._dataset_controll_buttons(0),
            *self._dataset_controll_buttons(1)
        ]

    def _dataset_controll_buttons(self, dataset_id: int) -> List[ObserverPropertiesItem]:
        patterns = [
            [False, False, False] * 2,
            [True, False, False] * 2,
            [False, True, False] * 2,
            [False, False, True] * 2,
            [True, True, False] * 2,
            [False, True, True] * 2,
            [True, False, True] * 2,
            [True, True, True] * 2,
            [True, True, True, True, True, False],
        ]

        def format_pattern(pattern: List[bool]) -> str:
            return "".join(['1' if p else '0' for p in pattern])

        return [
            self._prop_builder.button(f'Dataset {dataset_id} - {format_pattern(p)}',
                                      partial(self.set_sequences_filter, dataset_id, p))
            for p in patterns
        ]

    def get_accuracy_single_1(self) -> float:
        return self._n_accuracy_single_1.outputs.accuracy.tensor.item()

    def get_accuracy_per_flock_single_1(self) -> List[float]:
        return self._n_accuracy_single_1.outputs.accuracy_per_flock.tensor.tolist()

    def get_accuracy_1(self) -> float:
        return self._n_accuracy_1.outputs.accuracy.tensor.item()

    def get_accuracy_per_flock_1(self) -> List[float]:
        return self._n_accuracy_1.outputs.accuracy_per_flock.tensor.tolist()

    def get_accuracy_single_2(self) -> float:
        return self._n_accuracy_single_2.outputs.accuracy.tensor.item()

    def get_accuracy_per_flock_single_2(self) -> List[float]:
        return self._n_accuracy_single_2.outputs.accuracy_per_flock.tensor.tolist()

    def get_accuracy_2(self) -> float:
        return self._n_accuracy_2.outputs.accuracy.tensor.item()

    def get_accuracy_per_flock_2(self) -> List[float]:
        return self._n_accuracy_2.outputs.accuracy_per_flock.tensor.tolist()

    def get_actual_sequence_ids(self) -> List[int]:
        return self._n_dataset_switch.outputs.dataset_2_scalar_sequence_ids.tensor.tolist()

    def step(self):
        super().step()
        self._step_count += 1
Beispiel #2
0
class TemporalPoolerParamsProps(Initializable, ABC):
    _flock: TPFlock
    _prop_builder: ObserverPropertiesBuilder
    _params: TemporalPoolerParams

    def __init__(self, params: TemporalPoolerParams, flock: TPFlock):
        self._flock = flock
        self._params = params
        self._prop_builder = ObserverPropertiesBuilder(self, source_type=ObserverPropertiesItemSourceType.MODEL)

    def is_initialized(self) -> bool:
        return self._flock is not None

    @property
    def own_rewards_weight(self) -> float:
        return self._params.own_rewards_weight

    @own_rewards_weight.setter
    def own_rewards_weight(self, value: float):
        validate_positive_with_zero_float(value)
        self._params.own_rewards_weight = value

    @property
    def incoming_context_size(self) -> int:
        return self._params.incoming_context_size

    @incoming_context_size.setter
    def incoming_context_size(self, value: int):
        validate_positive_with_zero_int(value)
        self._params.incoming_context_size = value

    @property
    def buffer_size(self) -> int:
        return self._params.buffer_size

    @buffer_size.setter
    def buffer_size(self, value: int):
        validate_positive_int(value)
        if value < self.batch_size:
            raise FailedValidationException('buffer_size must be equal or greater then batch_size')
        self._params.buffer_size = value

    @property
    def batch_size(self) -> int:
        return self._params.batch_size

    @batch_size.setter
    def batch_size(self, value: int):
        validate_positive_int(value)
        if value > self.buffer_size:
            raise FailedValidationException('batch_size must be equal or less then buffer_size')
        self._params.batch_size = value

    @property
    def learning_period(self) -> int:
        return self._params.learning_period

    @learning_period.setter
    def learning_period(self, value: int):
        validate_positive_int(value)
        self._params.learning_period = value
        if self._flock is not None:
            self._flock.learning_period = value

    @property
    def enable_learning(self) -> bool:
        return self._params.enable_learning

    @enable_learning.setter
    def enable_learning(self, value: bool):
        self._params.enable_learning = value
        if self._flock is not None:
            self._flock.enable_learning = value

    @property
    def seq_length(self) -> int:
        return self._params.seq_length

    @seq_length.setter
    def seq_length(self, value: int):
        validate_positive_int(value)
        self._params.seq_length = value

    @property
    def seq_lookahead(self) -> int:
        return self._params.seq_lookahead

    @seq_lookahead.setter
    def seq_lookahead(self, value: int):
        validate_positive_int(value)
        self._params.seq_lookahead = value

    @property
    def n_frequent_seqs(self) -> int:
        return self._params.n_frequent_seqs

    @n_frequent_seqs.setter
    def n_frequent_seqs(self, value: int):
        validate_positive_int(value)
        self._params.n_frequent_seqs = value

    @property
    def max_encountered_seqs(self) -> int:
        return self._params.max_encountered_seqs

    @max_encountered_seqs.setter
    def max_encountered_seqs(self, value: int):
        validate_positive_int(value)
        self._params.max_encountered_seqs = value

    @property
    def forgetting_limit(self) -> int:
        return self._params.forgetting_limit

    @forgetting_limit.setter
    def forgetting_limit(self, value: int):
        validate_positive_int(value)
        if self.is_initialized():
            self._flock.forgetting_limit = value
        self._params.forgetting_limit = value

    @property
    def context_prior(self) -> float:
        return self._params.context_prior

    @context_prior.setter
    def context_prior(self, value: float):
        validate_positive_float(value)
        self._params.context_prior = value

    @property
    def exploration_attempts_prior(self) -> float:
        return self._params.exploration_attempts_prior

    @exploration_attempts_prior.setter
    def exploration_attempts_prior(self, value: float):
        validate_positive_float(value)
        self._params.exploration_attempts_prior = value

    @property
    def exploration_probability(self) -> float:
        return self._params.exploration_probability

    @exploration_probability.setter
    def exploration_probability(self, value: float):
        validate_positive_with_zero_float(value)
        self._params.exploration_probability = value
        if self.is_initialized():
            self._flock.exploration_probability = value

    @property
    def output_projection_persistance(self) -> float:
        return self._params.output_projection_persistence

    @output_projection_persistance.setter
    def output_projection_persistance(self, value: float):
        validate_float_in_range(value, 0, 1)
        self._params.output_projection_persistence = value

    @property
    def follow_goals(self) -> bool:
        return self._params.follow_goals

    @follow_goals.setter
    def follow_goals(self, value: bool):
        self._params.follow_goals = value
        if self.is_initialized():
            self._flock.follow_goals = value

    def reset_learnt_sequences(self):
        self._flock.reset_learnt_sequences()

    @property
    def compute_backward_pass(self) -> bool:
        return self._params.compute_backward_pass

    @compute_backward_pass.setter
    def compute_backward_pass(self, value: bool):
        self._params.compute_backward_pass = value
        if self.is_initialized():
            self._flock.compute_backward_pass = value

    @property
    def compute_best_matching_context(self) -> bool:
        return self._params.compute_best_matching_context

    @compute_best_matching_context.setter
    def compute_best_matching_context(self, value: bool):
        self._params.compute_best_matching_context = value
        if self.is_initialized():
            self._flock.compute_best_matching_context = value

    def get_properties(self) -> List[ObserverPropertiesItem]:
        return [
            self._prop_builder.button('TP_reset_learnt_sequences', self.reset_learnt_sequences),
            self._prop_builder.auto('TP_incoming_context_size', type(self).incoming_context_size,
                                    edit_strategy=disable_on_runtime,
                                    hint='Size of the context input without the two elements for reward'),
            self._prop_builder.auto('TP_buffer_size', type(self).buffer_size, edit_strategy=disable_on_runtime,
                                    hint='Size of the TP buffer - how many consecutive steps are stored'),
            self._prop_builder.auto('TP_batch_size', type(self).batch_size, edit_strategy=disable_on_runtime,
                                    hint="How large is the batch 'sampled' from the buffer - in the case of TP "
                                         "the batch always contains last X entries"),
            self._prop_builder.auto('TP_learning_period', type(self).learning_period,
                                    hint='How often does the learning of TP run (every Xth step of the TP)'),
            self._prop_builder.auto('TP_enable_learning', type(self).enable_learning, hint='TP learning is enabled'),
            self._prop_builder.auto('TP_seq_length', type(self).seq_length, edit_strategy=disable_on_runtime,
                                    hint='Length of the sequences considered in the TP, it equals lookbehind + lookahead'),
            self._prop_builder.auto('TP_seq_lookahead', type(self).seq_lookahead, edit_strategy=disable_on_runtime,
                                    hint='How large part of the sequence is lookahead (rest is lookbehind including '
                                         'the current cluster)'),
            self._prop_builder.auto('TP_n_frequent_seqs', type(self).n_frequent_seqs, edit_strategy=disable_on_runtime,
                                    hint='How many of the sequences from max_encountered_seqs are used in the forward '
                                         'and backward processes. Only X most frequent ones.'),
            self._prop_builder.auto('TP_max_encountered_seqs', type(self).max_encountered_seqs,
                                    edit_strategy=disable_on_runtime,
                                    hint='How many sequences does the TP know. Their statistics are updated during '
                                         'learning. If TP encounters more sequences, if forgets the least frequent ones.'),
            self._prop_builder.auto('TP_forgetting_limit', type(self).forgetting_limit,
                                    hint='Value influencing how fast is the old knowledge in TP replaced by the new '
                                         'knowledge. When adding new knowledge, it compresses old knowledge into X steps. '
                                         'This corresponds to exponential decay with factor 1/X.'),
            self._prop_builder.auto('TP_context_prior', type(self).context_prior, edit_strategy=disable_on_runtime,
                                    hint='What is the prior probability of seeing any new sequence in any context. '
                                         'This eliminates too extreme judgments based on only few data. '
                                         'It should not be normally changed.'),
            self._prop_builder.auto('TP_exploration_attempts_prior', type(self).exploration_attempts_prior,
                                    edit_strategy=disable_on_runtime,
                                    hint='Similar to the context_prior, but for exploration.'),
            self._prop_builder.auto('TP_exploration_probability', type(self).exploration_probability,
                                    hint='With this probability, the expert will be exploring instead of trying to '
                                         'fulfill goals.'),
            self._prop_builder.auto('TP_follow_goals', type(self).follow_goals,
                                    hint='Should the expert fulfill the goals rather just trying to do what it '
                                         'predicts will happen (trying to actively fulfill what the passive model '
                                         'predicts). True means that it tries to fulfill the goals.'),

            self._prop_builder.auto('TP_output_projection_persistence', type(self).output_projection_persistance,
                                    hint='This decays output_projection values in time (less event-driven behavior. '
                                         'Multiply output_projection by this number, compute new values of '
                                         'output_projection for experts that changed their inputs, '
                                         'set their values in the output_projection.'),

            self._prop_builder.auto("TP_own_rewards_weight", type(self).own_rewards_weight),
            self._prop_builder.auto('TP_compute_backward_pass', type(self).compute_backward_pass,
                                    hint='Should the active inference (goal-directed behavior, actions) be computed. '
                                         'If not needed, disabling this can speed up the computation'),
            self._prop_builder.auto('TP_compute_best_matching_context', type(self).compute_best_matching_context,
                                    hint='When set to true, internal predicted_clusters_by_context and output best_matching_context are computed'),
        ]
Beispiel #3
0
class ObserverView(PropertiesObservable):
    """A node that encompasses all the model's observables and passes them on to the observer system."""
    _strip_observer_name_prefix: str

    _observables: Dict[str, Observable]
    _first_show: bool = True

    def __init__(self,
                 name: str,
                 observer_system: ObserverSystem,
                 strip_observer_name_prefix: str = ''):
        self._strip_observer_name_prefix = strip_observer_name_prefix
        self.name = name
        self._observer_system = observer_system
        self._observables = {}
        observer_system.signals.window_closed.connect(self.on_window_closed)
        self._prop_builder = ObserverPropertiesBuilder(self)

    def _persist(self):
        self._observer_system.persist_observer_values(self.name, self)

    def on_window_closed(self, observer_name: str):
        if observer_name in self._observables:
            self._observer_system.unregister_observer(observer_name, False)
            self._persist()

    def close(self):
        self._unregister_observers()
        self._observer_system.unregister_observer(self.name, True)

    def set_observables(self, observables: Dict[str, Observable]):
        self._unregister_observers()
        self._observables = observables
        # default is no observers visible
        # self._register_observers()
        if self._first_show:
            self._observer_system.register_observer(self.name, self)
            self._first_show = False

    def _register_observers(self):
        for name, observable in self._observables.items():
            self._observer_system.register_observer(name, observable)

    def _unregister_observers(self):
        for name in self._observables.keys():
            self._observer_system.unregister_observer(name, True)

    def get_properties(self) -> List[ObserverPropertiesItem]:
        def enable_observers_handler(prop_name: str, value: bool):
            if value:
                logger.debug(f"Register observer {name}")
                self._observer_system.register_observer(
                    prop_name, self._observables[prop_name])
            else:
                logger.debug(f"Unregister observer {name}")
                self._observer_system.unregister_observer(prop_name, True)

        def remove_prefix(text: str, prefix: str):
            if text.startswith(prefix):
                return text[len(prefix):]
            else:
                return text

        observers = []
        last_header = ''
        for name, observable in self._observables.items():
            observer_name = remove_prefix(name,
                                          self._strip_observer_name_prefix)
            header = observer_name.split('.')[0]
            observer_name = remove_prefix(observer_name, f'{header}.')
            # add collapsible_header
            if last_header != header:
                last_header = header
                observers.append(
                    self._prop_builder.collapsible_header(header, False))

            observers.append(
                self._prop_builder.checkbox(
                    observer_name,
                    self._observer_system.is_observer_registered(name),
                    partial(enable_observers_handler, name)))

        def set_all():
            self._register_observers()
            self._persist()

        def set_none():
            self._unregister_observers()
            self._persist()

        return [
            self._prop_builder.button('All', set_all),
            self._prop_builder.button('None', set_none),
        ] + observers
Beispiel #4
0
class SpatialPoolerParamsProps(Initializable, ABC):
    _flock: SPFlock
    _prop_builder: ObserverPropertiesBuilder
    _params: SpatialPoolerParams

    def __init__(self, params: SpatialPoolerParams, flock: SPFlock):
        self._flock = flock
        self._params = params
        self._prop_builder = ObserverPropertiesBuilder(self, source_type=ObserverPropertiesItemSourceType.MODEL)

    def is_initialized(self) -> bool:
        return self._flock is not None

    @property
    def input_size(self) -> int:
        return self._params.input_size

    @property
    def buffer_size(self) -> int:
        return self._params.buffer_size

    @buffer_size.setter
    def buffer_size(self, value: int):
        validate_positive_int(value)
        if value < self.batch_size:
            raise FailedValidationException('buffer_size must be equal or greater then batch_size')
        self._params.buffer_size = value

    @property
    def batch_size(self) -> int:
        return self._params.batch_size

    @batch_size.setter
    def batch_size(self, value: int):
        validate_positive_int(value)
        if value > self.buffer_size:
            raise FailedValidationException('batch_size must be equal or less then buffer_size')
        self._params.batch_size = value

    @property
    def learning_rate(self) -> float:
        return self._params.learning_rate

    @learning_rate.setter
    def learning_rate(self, value: float):
        validate_positive_float(value)
        self._params.learning_rate = value
        if self._flock is not None:
            self._flock.learning_rate = value

    @property
    def cluster_boost_threshold(self) -> int:
        return self._params.cluster_boost_threshold

    @cluster_boost_threshold.setter
    def cluster_boost_threshold(self, value: int):
        validate_positive_int(value)
        self._params.cluster_boost_threshold = value

    @property
    def max_boost_time(self) -> int:
        return self._params.max_boost_time

    @max_boost_time.setter
    def max_boost_time(self, value: int):
        validate_positive_int(value)
        self._params.max_boost_time = value

    @property
    def learning_period(self) -> int:
        return self._params.learning_period

    @learning_period.setter
    def learning_period(self, value: int):
        validate_positive_int(value)
        self._params.learning_period = value

    @property
    def enable_learning(self) -> bool:
        return self._params.enable_learning

    def reset_cluster_centers(self):
        self._flock.initialize_cluster_centers()

    @enable_learning.setter
    def enable_learning(self, value: bool):
        self._params.enable_learning = value
        if self._flock is not None:
            self._flock.enable_learning = value

    @property
    def boost(self) -> bool:
        return self._params.boost

    @boost.setter
    def boost(self, value: bool):
        self._params.boost = value

    @property
    def sampling_method(self) -> SamplingMethod:
        return self._params.sampling_method

    @sampling_method.setter
    def sampling_method(self, value: SamplingMethod):
        self._params.sampling_method = value

    def get_properties(self) -> List[ObserverPropertiesItem]:
        return [
            self._prop_builder.auto('SP_input_size', type(self).input_size, edit_strategy=disable_on_runtime,
                                    hint='Size of input vector for one expert'),
            self._prop_builder.auto('SP_buffer_size', type(self).buffer_size, edit_strategy=disable_on_runtime,
                                    hint='Size of the SP buffer - how many last entries (steps) are stored'),
            self._prop_builder.auto('SP_batch_size', type(self).batch_size, edit_strategy=disable_on_runtime,
                                    hint='Size of the SP batch - it is sampled from the buffer'),
            self._prop_builder.auto('SP_learning_rate', type(self).learning_rate,
                                    hint='How much of a distance between the current position of the cluster center '
                                         'and its target position is removed in one learning process run'),
            self._prop_builder.auto('SP_enable_learning', type(self).enable_learning, hint='SP learning is enabled'),
            self._prop_builder.button('SP_reset_cluster_centers', self.reset_cluster_centers),
            #
            self._prop_builder.auto('SP_cluster_boost_threshold', type(self).cluster_boost_threshold,
                                    edit_strategy=disable_on_runtime,
                                    hint='If the cluster is without any datapoint for this many consecutive steps, '
                                         'the boosting starts'),
            self._prop_builder.auto('SP_max_boost_time', type(self).max_boost_time, edit_strategy=disable_on_runtime,
                                    hint='Is any cluster is boosted for this many steps, the boosting targets are '
                                         'recomputed'),
            self._prop_builder.auto('SP_learning_period', type(self).learning_period, edit_strategy=disable_on_runtime,
                                    hint='How often is the learning process run - every Xth of SP forward process runs'),
            self._prop_builder.auto('SP_boost', type(self).boost, edit_strategy=disable_on_runtime,
                                    hint='If false, the SP will not boost clusters which have no datapoints'),
            self._prop_builder.auto('SP_sampling_method', type(self).sampling_method, edit_strategy=disable_on_runtime,
                                    hint='<ul>'
                                         '<li>LAST_N - take last n entries from the buffer</li>'
                                         '<li>UNIFORM - sample uniformly from the whole buffer</li>'
                                         '<li>BALANCED - sample from the whole buffer so that the counts of points '
                                         'belonging to each cluster are approximately equal</li>'
                                         '</ul>'),
        ]