Beispiel #1
0
class ClusterProjectionsGroupProperties:
    _prop_builder: ObserverPropertiesBuilder
    _is_rgb: bool = False
    tensor_view_projection: TensorViewProjection

    def __init__(self):
        self._prop_builder = ObserverPropertiesBuilder(self)
        self.tensor_view_projection = TensorViewProjection(is_buffer=False)

    def project_and_scale(self, tensor: torch.Tensor):
        tensor, projection_params = self.tensor_view_projection.transform_tensor(
            tensor, self.is_rgb)
        return tensor, projection_params

    @property
    def is_rgb(self) -> bool:
        return self._is_rgb

    @is_rgb.setter
    def is_rgb(self, value: bool):
        self._is_rgb = value

    def get_properties(self) -> List[ObserverPropertiesItem]:
        properties = [self._prop_builder.auto("RGB",
                                              type(self).is_rgb)
                      ] + self.tensor_view_projection.get_properties()

        header_name = f'Projections'
        for prop in properties:
            prop.name = f"{header_name}.{prop.name}"

        return [
            self._prop_builder.collapsible_header(header_name, True),
            *properties
        ]
Beispiel #2
0
 def __init__(self,
              name: str,
              observer_system: ObserverSystem,
              strip_observer_name_prefix: str = ''):
     self._strip_observer_name_prefix = strip_observer_name_prefix
     self.name = name
     self._observer_system = observer_system
     self._observables = {}
     observer_system.signals.window_closed.connect(self.on_window_closed)
     self._prop_builder = ObserverPropertiesBuilder(self)
Beispiel #3
0
 def __init__(self, is_buffer: bool):
     self._real_shape = []
     self._shape = []
     self._items_per_row = 1
     self._min = 0
     self._max = 1
     self._logger = logging.getLogger(
         f"{__name__}.Observer.{type(self).__name__}")
     self._is_buffer = is_buffer
     self._sum_dim = None
     self._prop_builder = ObserverPropertiesBuilder(self)
Beispiel #4
0
class ExpertParamsProps(Initializable, ABC):
    _unit: 'ExpertFlockUnit'
    _prop_builder: ObserverPropertiesBuilder
    _params: ExpertParams

    def __init__(self, params: ExpertParams, unit: 'ExpertFlockUnit'):
        self._unit = unit
        self._params = params
        self._prop_builder = ObserverPropertiesBuilder(self, source_type=ObserverPropertiesItemSourceType.MODEL)

    def is_initialized(self) -> bool:
        return self._unit is not None

    @property
    def flock_size(self) -> int:
        return self._params.flock_size

    @flock_size.setter
    def flock_size(self, value: int):
        validate_positive_int(value)
        self._params.flock_size = value

    @property
    def n_cluster_centers(self) -> int:
        return self._params.n_cluster_centers

    @n_cluster_centers.setter
    def n_cluster_centers(self, value: int):
        validate_positive_int(value)
        self._params.n_cluster_centers = value

    @property
    def compute_reconstruction(self) -> bool:
        return self._params.compute_reconstruction

    @compute_reconstruction.setter
    def compute_reconstruction(self, value: bool):
        self._params.compute_reconstruction = value

    def get_properties(self) -> List[ObserverPropertiesItem]:
        return [
            self._prop_builder.auto('flock_size', type(self).flock_size, edit_strategy=disable_on_runtime),
            self._prop_builder.auto('n_cluster_centers', type(self).n_cluster_centers,
                                    edit_strategy=disable_on_runtime),
            self._prop_builder.auto('compute_reconstruction', type(self).compute_reconstruction,
                                    edit_strategy=disable_on_runtime),
        ]
Beispiel #5
0
    def __init__(self, node: HierarchicalObservableNode, expert_no: int):
        super().__init__()
        self._node = node
        self._expert_no = expert_no

        self._properties = {}

        self._grouped_projections = None
        self.prop_builder = ObserverPropertiesBuilder()

        # TODO HACK - persisted values are loaded prior to the node unit initialization which determines the number
        # of groups
        # properties not initialized - create dummy properties just to fix persistence
        self._default_properties = {
            i: HierarchicalGroupProperties(i, self)
            for i in range(self._groups_max_count)
        }
Beispiel #6
0
    def __init__(self, tensor_provider: TensorProvider):
        self._has_temporal_pooler = tensor_provider.has_temporal_pooler()

        self._n_cluster_centers = tensor_provider.n_cluster_centers()
        self._n_sequences = tensor_provider.n_sequences()
        self._sequence_length = tensor_provider.sequence_length()

        self.cluster_centers = ClusterCentersDataBuilder(tensor_provider)
        self.fdsim = FDsimDataBuilder(tensor_provider)
        self.n_dims = 2
        self.pca = PcaDataBuilder(tensor_provider)
        self.spring_lines = SpringLinesBuilder(tensor_provider)
        self.spline_arrows = SplineArrowsBuilder(tensor_provider)
        self._prop_builder = ObserverPropertiesBuilder()
        self._sequences_builder = SequencesBuilder(tensor_provider)
        self._show_cluster_centers = True
        self._show_cluster_datapoints = True
        self._show_spring_lines = self._has_temporal_pooler
        self._show_spline_arrows = self._has_temporal_pooler
        self._projection_type = ClusterObserverProjection.PCA
Beispiel #7
0
    def __init__(self,
                 name: str = None,
                 inputs: TInputs = None,
                 memory_blocks: TOutputs = None,
                 outputs: TOutputs = None):
        """Initializes the node.

        Inputs, memory_blocks (== internals) and outputs should be initialized here and accessible from now on
        for connecting.
        """
        # TODO (Feat): Auto-name nodes as in BrainSimulator, or remove the default value of parameter 'name'.
        self._name = name
        self.topological_order = -1
        self._id = 0
        self._skip = False

        self.inputs = inputs if inputs is not None else EmptyInputs(self)
        self.memory_blocks = memory_blocks if memory_blocks is not None else EmptyOutputs(
            self)
        self.outputs = outputs if outputs is not None else EmptyOutputs(self)
        self._prop_builder = ObserverPropertiesBuilder(
            self, source_type=ObserverPropertiesItemSourceType.MODEL)
        self._single_step_scoped_cache = SimpleResettableCache()
 def test_resolve_state_edit_strategy(self, initializable, state, strategy, exp_state, exp_description):
     builder = ObserverPropertiesBuilder(DataIsInitializable(initializable))
     res_state, res_description = builder._resolve_state_strategy(state, strategy)
     assert exp_state == res_state
     assert exp_description == res_description
Beispiel #9
0
class TemporalPoolerParamsProps(Initializable, ABC):
    _flock: TPFlock
    _prop_builder: ObserverPropertiesBuilder
    _params: TemporalPoolerParams

    def __init__(self, params: TemporalPoolerParams, flock: TPFlock):
        self._flock = flock
        self._params = params
        self._prop_builder = ObserverPropertiesBuilder(self, source_type=ObserverPropertiesItemSourceType.MODEL)

    def is_initialized(self) -> bool:
        return self._flock is not None

    @property
    def own_rewards_weight(self) -> float:
        return self._params.own_rewards_weight

    @own_rewards_weight.setter
    def own_rewards_weight(self, value: float):
        validate_positive_with_zero_float(value)
        self._params.own_rewards_weight = value

    @property
    def incoming_context_size(self) -> int:
        return self._params.incoming_context_size

    @incoming_context_size.setter
    def incoming_context_size(self, value: int):
        validate_positive_with_zero_int(value)
        self._params.incoming_context_size = value

    @property
    def buffer_size(self) -> int:
        return self._params.buffer_size

    @buffer_size.setter
    def buffer_size(self, value: int):
        validate_positive_int(value)
        if value < self.batch_size:
            raise FailedValidationException('buffer_size must be equal or greater then batch_size')
        self._params.buffer_size = value

    @property
    def batch_size(self) -> int:
        return self._params.batch_size

    @batch_size.setter
    def batch_size(self, value: int):
        validate_positive_int(value)
        if value > self.buffer_size:
            raise FailedValidationException('batch_size must be equal or less then buffer_size')
        self._params.batch_size = value

    @property
    def learning_period(self) -> int:
        return self._params.learning_period

    @learning_period.setter
    def learning_period(self, value: int):
        validate_positive_int(value)
        self._params.learning_period = value
        if self._flock is not None:
            self._flock.learning_period = value

    @property
    def enable_learning(self) -> bool:
        return self._params.enable_learning

    @enable_learning.setter
    def enable_learning(self, value: bool):
        self._params.enable_learning = value
        if self._flock is not None:
            self._flock.enable_learning = value

    @property
    def seq_length(self) -> int:
        return self._params.seq_length

    @seq_length.setter
    def seq_length(self, value: int):
        validate_positive_int(value)
        self._params.seq_length = value

    @property
    def seq_lookahead(self) -> int:
        return self._params.seq_lookahead

    @seq_lookahead.setter
    def seq_lookahead(self, value: int):
        validate_positive_int(value)
        self._params.seq_lookahead = value

    @property
    def n_frequent_seqs(self) -> int:
        return self._params.n_frequent_seqs

    @n_frequent_seqs.setter
    def n_frequent_seqs(self, value: int):
        validate_positive_int(value)
        self._params.n_frequent_seqs = value

    @property
    def max_encountered_seqs(self) -> int:
        return self._params.max_encountered_seqs

    @max_encountered_seqs.setter
    def max_encountered_seqs(self, value: int):
        validate_positive_int(value)
        self._params.max_encountered_seqs = value

    @property
    def forgetting_limit(self) -> int:
        return self._params.forgetting_limit

    @forgetting_limit.setter
    def forgetting_limit(self, value: int):
        validate_positive_int(value)
        if self.is_initialized():
            self._flock.forgetting_limit = value
        self._params.forgetting_limit = value

    @property
    def context_prior(self) -> float:
        return self._params.context_prior

    @context_prior.setter
    def context_prior(self, value: float):
        validate_positive_float(value)
        self._params.context_prior = value

    @property
    def exploration_attempts_prior(self) -> float:
        return self._params.exploration_attempts_prior

    @exploration_attempts_prior.setter
    def exploration_attempts_prior(self, value: float):
        validate_positive_float(value)
        self._params.exploration_attempts_prior = value

    @property
    def exploration_probability(self) -> float:
        return self._params.exploration_probability

    @exploration_probability.setter
    def exploration_probability(self, value: float):
        validate_positive_with_zero_float(value)
        self._params.exploration_probability = value
        if self.is_initialized():
            self._flock.exploration_probability = value

    @property
    def output_projection_persistance(self) -> float:
        return self._params.output_projection_persistence

    @output_projection_persistance.setter
    def output_projection_persistance(self, value: float):
        validate_float_in_range(value, 0, 1)
        self._params.output_projection_persistence = value

    @property
    def follow_goals(self) -> bool:
        return self._params.follow_goals

    @follow_goals.setter
    def follow_goals(self, value: bool):
        self._params.follow_goals = value
        if self.is_initialized():
            self._flock.follow_goals = value

    def reset_learnt_sequences(self):
        self._flock.reset_learnt_sequences()

    @property
    def compute_backward_pass(self) -> bool:
        return self._params.compute_backward_pass

    @compute_backward_pass.setter
    def compute_backward_pass(self, value: bool):
        self._params.compute_backward_pass = value
        if self.is_initialized():
            self._flock.compute_backward_pass = value

    @property
    def compute_best_matching_context(self) -> bool:
        return self._params.compute_best_matching_context

    @compute_best_matching_context.setter
    def compute_best_matching_context(self, value: bool):
        self._params.compute_best_matching_context = value
        if self.is_initialized():
            self._flock.compute_best_matching_context = value

    def get_properties(self) -> List[ObserverPropertiesItem]:
        return [
            self._prop_builder.button('TP_reset_learnt_sequences', self.reset_learnt_sequences),
            self._prop_builder.auto('TP_incoming_context_size', type(self).incoming_context_size,
                                    edit_strategy=disable_on_runtime,
                                    hint='Size of the context input without the two elements for reward'),
            self._prop_builder.auto('TP_buffer_size', type(self).buffer_size, edit_strategy=disable_on_runtime,
                                    hint='Size of the TP buffer - how many consecutive steps are stored'),
            self._prop_builder.auto('TP_batch_size', type(self).batch_size, edit_strategy=disable_on_runtime,
                                    hint="How large is the batch 'sampled' from the buffer - in the case of TP "
                                         "the batch always contains last X entries"),
            self._prop_builder.auto('TP_learning_period', type(self).learning_period,
                                    hint='How often does the learning of TP run (every Xth step of the TP)'),
            self._prop_builder.auto('TP_enable_learning', type(self).enable_learning, hint='TP learning is enabled'),
            self._prop_builder.auto('TP_seq_length', type(self).seq_length, edit_strategy=disable_on_runtime,
                                    hint='Length of the sequences considered in the TP, it equals lookbehind + lookahead'),
            self._prop_builder.auto('TP_seq_lookahead', type(self).seq_lookahead, edit_strategy=disable_on_runtime,
                                    hint='How large part of the sequence is lookahead (rest is lookbehind including '
                                         'the current cluster)'),
            self._prop_builder.auto('TP_n_frequent_seqs', type(self).n_frequent_seqs, edit_strategy=disable_on_runtime,
                                    hint='How many of the sequences from max_encountered_seqs are used in the forward '
                                         'and backward processes. Only X most frequent ones.'),
            self._prop_builder.auto('TP_max_encountered_seqs', type(self).max_encountered_seqs,
                                    edit_strategy=disable_on_runtime,
                                    hint='How many sequences does the TP know. Their statistics are updated during '
                                         'learning. If TP encounters more sequences, if forgets the least frequent ones.'),
            self._prop_builder.auto('TP_forgetting_limit', type(self).forgetting_limit,
                                    hint='Value influencing how fast is the old knowledge in TP replaced by the new '
                                         'knowledge. When adding new knowledge, it compresses old knowledge into X steps. '
                                         'This corresponds to exponential decay with factor 1/X.'),
            self._prop_builder.auto('TP_context_prior', type(self).context_prior, edit_strategy=disable_on_runtime,
                                    hint='What is the prior probability of seeing any new sequence in any context. '
                                         'This eliminates too extreme judgments based on only few data. '
                                         'It should not be normally changed.'),
            self._prop_builder.auto('TP_exploration_attempts_prior', type(self).exploration_attempts_prior,
                                    edit_strategy=disable_on_runtime,
                                    hint='Similar to the context_prior, but for exploration.'),
            self._prop_builder.auto('TP_exploration_probability', type(self).exploration_probability,
                                    hint='With this probability, the expert will be exploring instead of trying to '
                                         'fulfill goals.'),
            self._prop_builder.auto('TP_follow_goals', type(self).follow_goals,
                                    hint='Should the expert fulfill the goals rather just trying to do what it '
                                         'predicts will happen (trying to actively fulfill what the passive model '
                                         'predicts). True means that it tries to fulfill the goals.'),

            self._prop_builder.auto('TP_output_projection_persistence', type(self).output_projection_persistance,
                                    hint='This decays output_projection values in time (less event-driven behavior. '
                                         'Multiply output_projection by this number, compute new values of '
                                         'output_projection for experts that changed their inputs, '
                                         'set their values in the output_projection.'),

            self._prop_builder.auto("TP_own_rewards_weight", type(self).own_rewards_weight),
            self._prop_builder.auto('TP_compute_backward_pass', type(self).compute_backward_pass,
                                    hint='Should the active inference (goal-directed behavior, actions) be computed. '
                                         'If not needed, disabling this can speed up the computation'),
            self._prop_builder.auto('TP_compute_best_matching_context', type(self).compute_best_matching_context,
                                    hint='When set to true, internal predicted_clusters_by_context and output best_matching_context are computed'),
        ]
Beispiel #10
0
class ClusterObserver(ClusterObservable):
    _sequences_builder: SequencesBuilder
    _show_cluster_centers: bool
    _show_cluster_datapoints: bool
    _show_spring_lines: bool
    _show_spline_arrows: bool
    _projection_type: ClusterObserverProjection
    _prop_builder: ObserverPropertiesBuilder
    _n_cluster_centers: int
    _n_sequences: int
    _sequence_length: int

    _width: int = 640
    _height: int = 480
    _has_temporal_pooler: bool

    def __init__(self, tensor_provider: TensorProvider):
        self._has_temporal_pooler = tensor_provider.has_temporal_pooler()

        self._n_cluster_centers = tensor_provider.n_cluster_centers()
        self._n_sequences = tensor_provider.n_sequences()
        self._sequence_length = tensor_provider.sequence_length()

        self.cluster_centers = ClusterCentersDataBuilder(tensor_provider)
        self.fdsim = FDsimDataBuilder(tensor_provider)
        self.n_dims = 2
        self.pca = PcaDataBuilder(tensor_provider)
        self.spring_lines = SpringLinesBuilder(tensor_provider)
        self.spline_arrows = SplineArrowsBuilder(tensor_provider)
        self._prop_builder = ObserverPropertiesBuilder()
        self._sequences_builder = SequencesBuilder(tensor_provider)
        self._show_cluster_centers = True
        self._show_cluster_datapoints = True
        self._show_spring_lines = self._has_temporal_pooler
        self._show_spline_arrows = self._has_temporal_pooler
        self._projection_type = ClusterObserverProjection.PCA
        # self._pca_transformer = PcaTransformer()

    def get_data(self) -> ClusterObserverData:
        # if self._projection_type == ClusterObserverProjection.PCA:
        #     self.pca.update_pca_transformer(self._pca_transformer)

        return ClusterObserverData(
            cluster_centers=self.cluster_centers.get_data()
            if self._show_cluster_centers else None,
            fdsim=self.fdsim.get_data(),
            n_dims=self.n_dims,
            n_cluster_centers=self._n_cluster_centers,
            n_sequences=self._n_sequences,
            sequence_length=self._sequence_length,
            pca=self.pca.get_data(self.n_dims, self._show_cluster_datapoints)
            if self._projection_type == ClusterObserverProjection.PCA else
            None,
            projection_type="PCA" if self._projection_type
            == ClusterObserverProjection.PCA else "FDsim",
            width=self._width,
            height=self._height,
            spring_lines=self.spring_lines.get_data()
            if self._show_spring_lines else None,
            sequences=self._sequences_builder.get_data(),
            spline_arrows=self.spline_arrows.get_data()
            if self._show_spline_arrows else None,
        )

    def get_properties(self) -> List[ObserverPropertiesItem]:
        def update_projection_dim(value):
            if int(value) == 0:
                self.n_dims = 2
            else:
                self.n_dims = 3
            return value

        def update_show_cluster_centers(value: bool) -> bool:
            self._show_cluster_centers = value
            return value

        def update_show_cluster_datapoints(value: bool) -> bool:
            self._show_cluster_datapoints = value
            return value

        def update_show_spring_lines(value: bool) -> bool:
            self._show_spring_lines = value
            return value

        def update_show_spline_arrows(value: bool) -> bool:
            self._show_spline_arrows = value
            return value

        def format_projection_type(value: ClusterObserverProjection) -> int:
            if value == ClusterObserverProjection.PCA:
                return 0
            elif value == ClusterObserverProjection.FD_SIM:
                return 1
            else:
                raise IllegalArgumentException(
                    f'Unrecognized projection {value}')

        def update_projection_type(value):
            old_type = self._projection_type
            if int(value) == 0:
                self._projection_type = ClusterObserverProjection.PCA
            elif int(value) == 1:
                self._projection_type = ClusterObserverProjection.FD_SIM
            else:
                raise IllegalArgumentException(
                    f'Unrecognized projection {value}')

            if self._projection_type == ClusterObserverProjection.PCA and old_type != ClusterObserverProjection.PCA:
                self.pca.reset()

            return value

        def reset_projection(value):
            if self._projection_type == ClusterObserverProjection.PCA:
                self.pca.reset()
            elif self._projection_type == ClusterObserverProjection.FD_SIM:
                self.fdsim.reset()
            else:
                raise IllegalArgumentException(
                    f'Unrecognized projection {value}')

        def update_width(value):
            self._width = int(value)
            return value

        def update_height(value):
            self._height = int(value)
            return value

        def yield_props():
            yield ObserverPropertiesItem(
                'Projection',
                'select',
                format_projection_type(self._projection_type),
                update_projection_type,
                select_values=[
                    ObserverPropertiesItemSelectValueItem('PCA'),
                    ObserverPropertiesItemSelectValueItem('Force simulation')
                ],
                state=ObserverPropertiesItemState.ENABLED
                if self._has_temporal_pooler else
                ObserverPropertiesItemState.READ_ONLY)

            yield ObserverPropertiesItem(
                'Projection dimensionality',
                'select',
                0 if self.n_dims == 2 else 1,
                update_projection_dim,
                select_values=[
                    ObserverPropertiesItemSelectValueItem('2D'),
                    ObserverPropertiesItemSelectValueItem('3D')
                ])

            yield ObserverPropertiesItem('Reset Projection', 'button', "Reset",
                                         reset_projection)

            # Enablers
            yield self._prop_builder.checkbox('Show Cluster Centers',
                                              self._show_cluster_centers,
                                              update_show_cluster_centers)
            yield self._prop_builder.checkbox(
                'Show Cluster Datapoints',
                self._show_cluster_datapoints if self._projection_type
                == ClusterObserverProjection.PCA else False,
                update_show_cluster_datapoints,
                state=ObserverPropertiesItemState.ENABLED
                if self._projection_type == ClusterObserverProjection.PCA else
                ObserverPropertiesItemState.DISABLED)
            yield self._prop_builder.checkbox(
                'Show Spring Lines',
                self._show_spring_lines
                if self._has_temporal_pooler else False,
                update_show_spring_lines,
                state=ObserverPropertiesItemState.ENABLED
                if self._has_temporal_pooler else
                ObserverPropertiesItemState.DISABLED)
            yield self._prop_builder.checkbox(
                'Show Spline Arrows',
                self._show_spline_arrows
                if self._has_temporal_pooler else False,
                update_show_spline_arrows,
                state=ObserverPropertiesItemState.ENABLED
                if self._has_temporal_pooler else
                ObserverPropertiesItemState.DISABLED)

            # Cluster Centers
            yield self._prop_builder.collapsible_header(
                'Cluster Centers', default_is_expanded=True)
            yield from self.cluster_centers.get_properties(
                enabled=self._show_cluster_centers)

            # Spline Arrows
            yield self._prop_builder.collapsible_header(
                'Spline Arrows', default_is_expanded=True)
            yield from self.spline_arrows.get_properties(
                enabled=self._show_spline_arrows)

            # Canvas
            yield self._prop_builder.collapsible_header(
                'Canvas', default_is_expanded=True)
            yield ObserverPropertiesItem('Width', 'number', self._width,
                                         update_width)
            yield ObserverPropertiesItem('Height', 'number', self._height,
                                         update_height)

            # Force Simulation
            if self._has_temporal_pooler:
                yield ObserverPropertiesItem('Force simulation',
                                             'collapsible_header', True,
                                             lambda _: "True")
                yield from self.fdsim.get_properties()

        return list(yield_props())
    def __init__(self, params: GradualLearningBasicTopologyParams = GradualLearningBasicTopologyParams()):
        super().__init__('cuda')
        self._prop_builder = ObserverPropertiesBuilder(self, source_type=ObserverPropertiesItemSourceType.MODEL)

        self._params = params
        self.create_topology()
Beispiel #12
0
class HierarchicalObserver(Observable):
    # minimum observer size in pixels, used for automatic rescaling of observers which are too small

    # used to hack persistence
    _groups_max_count: int = 10
    _default_properties: Dict[int, HierarchicalGroupProperties]

    _properties: Dict[int, HierarchicalGroupProperties]
    _grouped_projections: List[List[torch.Tensor]]
    prop_builder: ObserverPropertiesBuilder
    _items_per_row: int = 1
    _groups_stacking: HierarchicalObservableGroupsStacking = HierarchicalObservableGroupsStacking.HORIZONTAL
    minimal_group_size: int = 10

    def __init__(self, node: HierarchicalObservableNode, expert_no: int):
        super().__init__()
        self._node = node
        self._expert_no = expert_no

        self._properties = {}

        self._grouped_projections = None
        self.prop_builder = ObserverPropertiesBuilder()

        # TODO HACK - persisted values are loaded prior to the node unit initialization which determines the number
        # of groups
        # properties not initialized - create dummy properties just to fix persistence
        self._default_properties = {
            i: HierarchicalGroupProperties(i, self)
            for i in range(self._groups_max_count)
        }

    def get_data(self) -> HierarchicalObservableData:
        self._grouped_projections = grouped_projections = get_inverse_projections_for_all_clusters(
            self._node, self._expert_no)

        for i in range(len(grouped_projections)):
            if i not in self._properties:
                if i < len(self._default_properties):
                    # New group - load default properties (with loaded data from persistence storage)
                    self._properties[i] = self._default_properties[i]
                else:
                    logger.warning(
                        f'Hierarchical observer {self._node.name_with_id}.expert_{self._expert_no}: Too '
                        f'many groups found, values will not be persisted. Increase self._groups_max_count.'
                    )
                    self._properties[i] = HierarchicalGroupProperties(i, self)

        image_groups = []
        params_groups = []
        for i, projection_group in enumerate(grouped_projections):
            group_properties = self._properties[i]

            group_images = []
            group_projection_params = None
            for projection in projection_group:
                tensor, projection_params = group_properties.project_and_scale(
                    projection)
                group_images.append(tensor)

                # These are not appended, they are all the same.
                group_projection_params = projection_params

            image_groups.append(group_images)
            params_groups.append(
                HierarchicalObservableParams(
                    scale=group_properties.scale,
                    projection=group_projection_params))

        return HierarchicalObservableData(self._groups_stacking,
                                          self._items_per_row, image_groups,
                                          params_groups)

    def get_properties(self) -> List[ObserverPropertiesItem]:
        def update_items_per_row(value: int):
            self._items_per_row = value

        def update_minimal_group_size(value: int):
            self.minimal_group_size = value

        def update_groups_stacking(value):
            self._groups_stacking = value

        properties = [
            self.prop_builder.collapsible_header('Global', True),
            self.prop_builder.select('Global.Groups stacking',
                                     self._groups_stacking,
                                     update_groups_stacking,
                                     HierarchicalObservableGroupsStacking),
            self.prop_builder.number_int('Global.Items per row',
                                         self._items_per_row,
                                         update_items_per_row),
            self.prop_builder.number_int('Global.Minimal size',
                                         self.minimal_group_size,
                                         update_minimal_group_size)
        ]

        if len(self._properties) == 0:
            # Hack for property persistence - this branch is visited when the observer system is initialized
            # and persisted values are loaded into the properties - the placeholder properties are needed
            for group_id in self._default_properties:
                properties.extend(
                    self._default_properties[group_id].get_properties())
        else:
            for group_id in self._properties:
                properties.extend(self._properties[group_id].get_properties())

        return properties

    def request_callback(self, request_data: RequestData):
        data = request_data.data
        x = int(data['x'])
        y = int(data['y'])
        group_idx = int(data['group_idx'])
        image_idx = int(data['image_idx'])

        lookup_not_possible = (self._grouped_projections is None) or (len(
            self._grouped_projections) < group_idx + 1) or (
                group_idx not in self._properties
            ) or self._properties[group_idx].is_rgb or (len(
                self._grouped_projections[group_idx]) < image_idx + 1)

        if lookup_not_possible:
            value = float('nan')
        else:
            value = self._properties[
                group_idx].tensor_view_projection.value_at(
                    self._grouped_projections[group_idx][image_idx], x, y)

        return {"value": 'NaN' if math.isnan(value) else value}

    def get_callbacks(self) -> ObserverCallbacks:
        return ObserverCallbacks().add_request(self.request_callback)
Beispiel #13
0
class ObserverView(PropertiesObservable):
    """A node that encompasses all the model's observables and passes them on to the observer system."""
    _strip_observer_name_prefix: str

    _observables: Dict[str, Observable]
    _first_show: bool = True

    def __init__(self,
                 name: str,
                 observer_system: ObserverSystem,
                 strip_observer_name_prefix: str = ''):
        self._strip_observer_name_prefix = strip_observer_name_prefix
        self.name = name
        self._observer_system = observer_system
        self._observables = {}
        observer_system.signals.window_closed.connect(self.on_window_closed)
        self._prop_builder = ObserverPropertiesBuilder(self)

    def _persist(self):
        self._observer_system.persist_observer_values(self.name, self)

    def on_window_closed(self, observer_name: str):
        if observer_name in self._observables:
            self._observer_system.unregister_observer(observer_name, False)
            self._persist()

    def close(self):
        self._unregister_observers()
        self._observer_system.unregister_observer(self.name, True)

    def set_observables(self, observables: Dict[str, Observable]):
        self._unregister_observers()
        self._observables = observables
        # default is no observers visible
        # self._register_observers()
        if self._first_show:
            self._observer_system.register_observer(self.name, self)
            self._first_show = False

    def _register_observers(self):
        for name, observable in self._observables.items():
            self._observer_system.register_observer(name, observable)

    def _unregister_observers(self):
        for name in self._observables.keys():
            self._observer_system.unregister_observer(name, True)

    def get_properties(self) -> List[ObserverPropertiesItem]:
        def enable_observers_handler(prop_name: str, value: bool):
            if value:
                logger.debug(f"Register observer {name}")
                self._observer_system.register_observer(
                    prop_name, self._observables[prop_name])
            else:
                logger.debug(f"Unregister observer {name}")
                self._observer_system.unregister_observer(prop_name, True)

        def remove_prefix(text: str, prefix: str):
            if text.startswith(prefix):
                return text[len(prefix):]
            else:
                return text

        observers = []
        last_header = ''
        for name, observable in self._observables.items():
            observer_name = remove_prefix(name,
                                          self._strip_observer_name_prefix)
            header = observer_name.split('.')[0]
            observer_name = remove_prefix(observer_name, f'{header}.')
            # add collapsible_header
            if last_header != header:
                last_header = header
                observers.append(
                    self._prop_builder.collapsible_header(header, False))

            observers.append(
                self._prop_builder.checkbox(
                    observer_name,
                    self._observer_system.is_observer_registered(name),
                    partial(enable_observers_handler, name)))

        def set_all():
            self._register_observers()
            self._persist()

        def set_none():
            self._unregister_observers()
            self._persist()

        return [
            self._prop_builder.button('All', set_all),
            self._prop_builder.button('None', set_none),
        ] + observers
Beispiel #14
0
 def __init__(self, params: ExpertParams, unit: 'ExpertFlockUnit'):
     self._unit = unit
     self._params = params
     self._prop_builder = ObserverPropertiesBuilder(self, source_type=ObserverPropertiesItemSourceType.MODEL)
Beispiel #15
0
 def __init__(self, params: SpatialPoolerParams, flock: SPFlock):
     self._flock = flock
     self._params = params
     self._prop_builder = ObserverPropertiesBuilder(self, source_type=ObserverPropertiesItemSourceType.MODEL)
 def test_resolve_state_edit_strategy_exception(self):
     builder = ObserverPropertiesBuilder(DataIsNotInitializable())
     with raises(IllegalArgumentException,
                 match=r'Expected instance of .*Initializable.* but .*DataIsNotInitializable.* received.'):
         builder._resolve_state_strategy(ObserverPropertiesItemState.ENABLED, enable_on_runtime)
Beispiel #17
0
class NodeBase(PropertiesProvider, ObservableProvider, Persistable, ABC,
               Generic[TInputs, TInternals, TOutputs], Initializable):
    """Defines a basic Node in the Graph.

    This should generally not be subclassed - look at worker_node_base or node_group modules instead.

    Usage:
    class ReallyCustomNode(NodeBase[SuchCustomInputs, MuchCustomInternals, VeryCustomOutputs]):
        ...
    """
    topological_order: int

    _name: str
    _id: int

    # All inputs.
    inputs: TInputs
    # inputs: InputsBase[TInputSlot]
    # Memory blocks which are normally not used as outputs.
    memory_blocks: TInternals
    # memory_blocks: OutputsBase[TOutputSlot]
    # Memory blocks which are normally used as outputs.
    outputs: TOutputs
    # outputs: OutputsBase[TOutputSlot]

    _skip: bool
    _prop_builder: ObserverPropertiesBuilder
    _single_step_scoped_cache: SimpleResettableCache

    @property
    def name(self):
        if self._name is None:
            self._name = "Node"

        return self._name

    @name.setter
    def name(self, value):
        self._name = value

    @property
    def name_with_id(self):
        return f"#{self._id} {self.name}"

    @property
    def id(self):
        return self._id

    @id.setter
    def id(self, value):
        self._id = value

    @property
    def skip_execution(self) -> bool:
        return self._skip

    @skip_execution.setter
    def skip_execution(self, value: bool):
        self._skip = value

    def __init__(self,
                 name: str = None,
                 inputs: TInputs = None,
                 memory_blocks: TOutputs = None,
                 outputs: TOutputs = None):
        """Initializes the node.

        Inputs, memory_blocks (== internals) and outputs should be initialized here and accessible from now on
        for connecting.
        """
        # TODO (Feat): Auto-name nodes as in BrainSimulator, or remove the default value of parameter 'name'.
        self._name = name
        self.topological_order = -1
        self._id = 0
        self._skip = False

        self.inputs = inputs if inputs is not None else EmptyInputs(self)
        self.memory_blocks = memory_blocks if memory_blocks is not None else EmptyOutputs(
            self)
        self.outputs = outputs if outputs is not None else EmptyOutputs(self)
        self._prop_builder = ObserverPropertiesBuilder(
            self, source_type=ObserverPropertiesItemSourceType.MODEL)
        self._single_step_scoped_cache = SimpleResettableCache()

    def get_observables(self) -> Dict[str, Observable]:
        """Collect things that can be observed.

        Do not override this method, override _get_observables() instead!
        """
        observables = chain(self._get_memory_block_observables().items(),
                            self._get_observables().items())

        # Add prefix to observable names
        prefixed_observables: OrderedDict[str, Observable] = OrderedDict()
        for name, observer in observables:
            prefixed_observables[f"{self.name_with_id}.{name}"] = observer
        return prefixed_observables

    def _get_memory_block_observables(self) -> Dict[str, Observable]:
        def create_observers(result: Dict[str, Observable], prefix: str,
                             container: SlotContainerBase):
            for item in container:
                result[f'{prefix}.{item.name}'] = item.get_observable()

        observables: OrderedDict[str, Observable] = OrderedDict()
        create_observers(observables, 'Input', self.inputs)
        create_observers(observables, 'Internal', self.memory_blocks)
        create_observers(observables, 'Output', self.outputs)
        return observables

    def _get_observables(self) -> Dict[str, Observable]:
        """Get observables of the node.

        Override this method in subclasses to add custom Observables.

        Returns:
            Dict of name -> Observable.
        """
        return {}

    @abstractmethod
    def detect_dims_change(self) -> bool:
        """Checks whether the dimensions of slots have changed since last time this was called.

        Check the change of the inputs here (if something changed, recompute output dimensions).

        Returns:
            True if the sizes changed, False otherwise.
        """
        pass

    @abstractmethod
    def allocate_memory_blocks(self, tensor_creator: TensorCreator):
        """Prepares the unit before the simulation is run.

        This gets called multiple times during the dimension "shake-down" and then once before the simulation runs.
        """
        pass

    @abstractmethod
    def release_memory_blocks(self):
        """Revert the unit to the uninitialized state.

        Release the unit, and all memory blocks.
        """
        pass

    def validate(self):
        """Called after allocate_memory_blocks, before the first step runs.

        If a node cannot run in the current configuration of state/connected inputs/tensor dimensions, it should raise
        NodeValidationException (or a subclass).
        """
        pass

    def step(self):
        """Perform one node step unless self.skip_execution is True."""
        if not self.skip_execution:
            self._single_step_scoped_cache.reset()
            self._step()

    @abstractmethod
    def _step(self):
        """Perform the execution of the step.

        This should retrieve input tensors and pass them into unit.step().
        """

    def get_properties(self) -> List[ObserverPropertiesItem]:
        return [
            self._prop_builder.auto('Skip execution',
                                    type(self).skip_execution)
        ]

    def _get_persistence_name(self):
        return f'{self.name}_{self.id}'

    def save(self,
             parent_saver: Saver,
             persistence_name: Optional[str] = None):
        """Save the node in the context of its parent saver."""

        folder_name = persistence_name or self._get_persistence_name()
        saver = parent_saver.create_child(folder_name)

        self._save(saver)

    def load(self, parent_loader: Loader):
        """Load the node and its tensors from location relative to the parent loader."""
        folder_name = self._get_persistence_name()
        loader = parent_loader.load_child(folder_name)

        self._load(loader)

    def _save(self, saver: Saver):
        pass

    def _load(self, loader: Loader):
        pass
class TestObserverPropertiesBuilder(ABC):
    builder: ObserverPropertiesBuilder

    def setup_method(self):
        self.data = Data()
        self.builder = ObserverPropertiesBuilder(self.data)

    @pytest.mark.parametrize("input,expected", [
        ([], ''),
        ([1], '1'),
        ([1, 2, 3], '1,2,3'),
        ([1.5, 2, 'abc'], '1.5,2,abc'),
    ])
    def test_format_list(self, input, expected):
        assert expected == ObserverPropertiesBuilder._format_list(input)

    @pytest.mark.parametrize("input,expected", [
        ('', []),
        ('1', [1]),
        ('1,2,3', [1, 2, 3]),
    ])
    def test_parse_list(self, input, expected):
        assert expected == ObserverPropertiesBuilder._parse_list(input, int)

    @pytest.mark.parametrize("input,expected", [
        ([], ''),
        ([1], '<1>'),
        ([1, 2, 3], '<1>,<2>,<3>'),
    ])
    def test_format_list(self, input, expected):
        assert expected == ObserverPropertiesBuilder._format_list(input, lambda i: f'<{i}>')

    def test_auto_needs_instance_to_be_set(self):
        with raises(IllegalArgumentException, match=r'.*Instance not set.*'):
            builder = ObserverPropertiesBuilder()
            builder.auto("Test", Data.p_int)

    def test_auto_undefined_type(self):
        with raises(IllegalArgumentException, match=r'.*Property getter must be annotated*'):
            self.builder.auto("Test", Data.p_int_no_type)

    def test_auto_no_setter_means_readonly(self):
        self.data.p_float = 1.0
        item = self.builder.auto("Test", Data.p_float_no_setter)
        assert 1.0 == item.value
        assert ObserverPropertiesItemState.READ_ONLY == item.state
        item.callback('1.5')
        assert 1.0 == item.value

    def test_auto_int(self):
        self.data.p_int = 10
        item = self.builder.auto("Test", Data.p_int)
        assert 10 == item.value
        assert ObserverPropertiesItemType.NUMBER == item.type
        assert ObserverPropertiesItemState.ENABLED == item.state
        item.callback("20")
        assert 20 == self.data.p_int

    def test_auto_str(self):
        self.data.p_str = 'abc'
        item = self.builder.auto("Test", Data.p_str)
        assert 'abc' == item.value
        assert ObserverPropertiesItemType.TEXT == item.type
        assert ObserverPropertiesItemState.ENABLED == item.state
        item.callback("text")
        assert "text" == self.data.p_str

    def test_auto_float(self):
        self.data.p_float = -1.4
        item = self.builder.auto("Test", Data.p_float)
        assert -1.4 == item.value
        item.callback("-2.14")
        assert ObserverPropertiesItemType.NUMBER == item.type
        assert ObserverPropertiesItemState.ENABLED == item.state
        assert -2.14 == self.data.p_float

    def test_auto_bool(self):
        self.data.p_bool = True
        item = self.builder.auto("Test", Data.p_bool)
        assert True is item.value
        assert ObserverPropertiesItemType.CHECKBOX == item.type
        item.callback("False")
        assert False is self.data.p_bool

    def test_auto_list_int(self):
        self.data.p_int_list = [1, 2, 3]
        item = self.builder.auto("Test", Data.p_int_list)
        assert '1,2,3' == item.value
        assert ObserverPropertiesItemType.TEXT == item.type
        item.callback("5,6")
        assert [5, 6] == self.data.p_int_list

    @pytest.mark.parametrize('input_value', [
        "5,1.2,5",
        "5,abc,5",
        "abc"
    ])
    def test_auto_list_int_invalid_input(self, input_value):
        with raises(FailedValidationException, match="Expected List\[int\], syntax error:"):
            self.data.p_int_list = [1, 2, 3]
            item = self.builder.auto("Test", Data.p_int_list)
            item.callback(input_value)

    def test_optional_int(self):
        self.data.p_optional_int = None
        item = self.builder.auto("Test", Data.p_optional_int)
        assert None is item.value
        assert ObserverPropertiesItemType.NUMBER == item.type
        assert True is item.optional
        item.callback("1")
        assert 1 == self.data.p_optional_int

    def test_optional_list_int(self):
        self.data.p_optional_list_int = None
        item = self.builder.auto("Test", Data.p_optional_list_int)
        assert None is item.value
        assert ObserverPropertiesItemType.TEXT == item.type
        assert True is item.optional
        item.callback("1,2")
        assert [1, 2] == self.data.p_optional_list_int

    def test_enum(self):
        self.data.p_enum = Types.ONE
        item = self.builder.auto("Test", Data.p_enum)
        assert '0' == item.value
        assert ObserverPropertiesItemType.SELECT == item.type
        item.callback('1')
        assert Types.TWO == self.data.p_enum

    def test_tuple_int_int(self):
        self.data.p_tuple_int_int = (10, 20)
        item = self.builder.auto("Test", Data.p_tuple_int_int)
        assert '10,20' == item.value
        assert ObserverPropertiesItemType.TEXT == item.type
        item.callback('1,2')
        assert (1, 2) == self.data.p_tuple_int_int

    @pytest.mark.parametrize("param, exception", [
        ('1', 'Expected exactly 2 items, but 1 received'),
        ('1, 2, 3', 'Expected exactly 2 items, but 3 received')
    ])
    def test_tuple_int_int(self, param, exception):
        self.data.p_tuple_int_int = (10, 20)
        item = self.builder.auto("Test", Data.p_tuple_int_int)
        with raises(FailedValidationException, match=exception):
            item.callback(param)

    def test_tuple_float_float(self):
        self.data.p_tuple_float_float = (1.1, 1.2)
        item = self.builder.auto("Test", Data.p_tuple_float_float)
        assert '1.1,1.2' == item.value
        assert ObserverPropertiesItemType.TEXT == item.type
        item.callback('3.5,4.7')
        assert (3.5,4.7) == self.data.p_tuple_float_float

    @pytest.mark.parametrize("param, exception", [
        ('1', 'Expected exactly 2 items, but 1 received'),
        ('1, 2, 3', 'Expected exactly 2 items, but 3 received')
    ])
    def test_tuple_float_float(self, param, exception):
        self.data.p_tuple_float_float = (1.1, 1.2)
        item = self.builder.auto("Test", Data.p_tuple_float_float)
        with raises(FailedValidationException, match=exception):
            item.callback(param)

    @pytest.mark.parametrize("state, enabled, result, should_pass", [
        (ObserverPropertiesItemState.ENABLED, None, ObserverPropertiesItemState.ENABLED, True),
        (ObserverPropertiesItemState.DISABLED, None, ObserverPropertiesItemState.DISABLED, True),
        (ObserverPropertiesItemState.READ_ONLY, None, ObserverPropertiesItemState.READ_ONLY, True),
        (None, True, ObserverPropertiesItemState.ENABLED, True),
        (None, False, ObserverPropertiesItemState.DISABLED, True),
        (None, None, ObserverPropertiesItemState.ENABLED, True),  # default is enabled
        (ObserverPropertiesItemState.ENABLED, False, None, False),
    ])
    def test_resolve_state(self, state, enabled, result, should_pass):
        if should_pass:
            assert result == ObserverPropertiesBuilder._resolve_state(state, enabled)
        else:
            with raises(IllegalArgumentException):
                ObserverPropertiesBuilder._resolve_state(state, enabled)

    @pytest.mark.parametrize("initializable, state, strategy, exp_state, exp_description", [
        (True, ObserverPropertiesItemState.ENABLED, enable_on_runtime, ObserverPropertiesItemState.ENABLED, None),
        (True, ObserverPropertiesItemState.DISABLED, enable_on_runtime, ObserverPropertiesItemState.DISABLED, None),
        (True, ObserverPropertiesItemState.READ_ONLY, enable_on_runtime, ObserverPropertiesItemState.READ_ONLY, None),
        (False, ObserverPropertiesItemState.ENABLED, enable_on_runtime, ObserverPropertiesItemState.DISABLED,
         "Item can be modified only when the simulation is running"),
        (False, ObserverPropertiesItemState.DISABLED, enable_on_runtime, ObserverPropertiesItemState.DISABLED,
         "Item can be modified only when the simulation is running"),
        (False, ObserverPropertiesItemState.ENABLED, disable_on_runtime, ObserverPropertiesItemState.ENABLED, None),
        (True, ObserverPropertiesItemState.ENABLED, disable_on_runtime, ObserverPropertiesItemState.DISABLED,
         "Item can be modified only when the simulation is stopped")
    ])
    def test_resolve_state_edit_strategy(self, initializable, state, strategy, exp_state, exp_description):
        builder = ObserverPropertiesBuilder(DataIsInitializable(initializable))
        res_state, res_description = builder._resolve_state_strategy(state, strategy)
        assert exp_state == res_state
        assert exp_description == res_description

    def test_resolve_state_edit_strategy_exception(self):
        builder = ObserverPropertiesBuilder(DataIsNotInitializable())
        with raises(IllegalArgumentException,
                    match=r'Expected instance of .*Initializable.* but .*DataIsNotInitializable.* received.'):
            builder._resolve_state_strategy(ObserverPropertiesItemState.ENABLED, enable_on_runtime)
class GradualLearningBasicTopology(Topology):
    """
    Long words utilizing context

    Interesting observers:
      gate:
        * SP Learn Process - Data Batch, sum over dim 1 (zero values means sequence not present in batch)
        * SP cluster centers
        * SP output forward clusters
      specialist:
        * SP_frequent_seqs_reconstruction - symbols reconstruction
        * TP_frequent_context_likelihood - show context per each symbol in learnt sequences(items per row 2)
        * TP_seq_likelihoods_by_cluster
    """
    _n_accuracy_2: AccuracyNode
    _n_accuracy_1: AccuracyNode
    _n_accuracy_single_2: AccuracyNode
    _n_accuracy_single_1: AccuracyNode
    _n_dataset_switch: DatasetSwitchNodeGroup
    _n_specialist: SpecialistNodeGroup
    _prop_builder: ObserverPropertiesBuilder
    _step_count: int = 0
    _active_dataset: int = 0

    def __init__(self, params: GradualLearningBasicTopologyParams = GradualLearningBasicTopologyParams()):
        super().__init__('cuda')
        self._prop_builder = ObserverPropertiesBuilder(self, source_type=ObserverPropertiesItemSourceType.MODEL)

        self._params = params
        self.create_topology()

    @property
    def params(self):
        return self._params

    def create_topology(self):
        """
                                        +----------------+
            +-------------+             | dataset_switch |
            |             |             +--+-----+-------+
            |             v                |     |
            |  +----------+------------+   |     |
            |  | context_feedback_pass |   |     |
            |  +--------------------+--+   |     |
            |                       |      |     |
            |                       v      v     |
            |               +-------+------+--+  |
            |               | gate_input_join |  |
            |               +-------+---------+  |
            |                       |            |
            |                       v            |
            |              +--------+---------+  |
            |              | gate_input_noise |  |
            |              +--------+---------+  |
            |                       |            |
            |                       v            |
            |                   +---+--+         |
            |                   | gate |         |
            |                   +---+--+         |
            |                       |            |
            |                       v            |
            |               +-------+--------+   +--------+
            |               | format_context |   |        |
            |               +-------+--------+   |        |
            |                       |            v        |
            |                       |     +------+-----+  |
            |                       ---->-+ specialist |  |
            |                             +--+--------++  |
            |                                |        |   |
            +--------------------------------+        v   v
                                                   ++--------++
                                                   | accuracy |
                                                   +----------+
        """

        n_gate = SpatialPoolerFlockNode(
            ExpertParams(flock_size=self._params.flock_size,
                         n_cluster_centers=self._params.seq_count,
                         spatial=SpatialPoolerParams(
                             # input_size=3,
                             enable_learning=True,
                             buffer_size=self._params.gate_buffer_size,
                             batch_size=100,
                             learning_rate=0.2,
                             learning_period=10,
                             cluster_boost_threshold=100,
                             max_boost_time=200
                         ),
                         ),
            name="Gate"
        )
        self.add_node(n_gate)

        # Specialist
        n_specialist = SpecialistNodeGroup(SpecialistNodeGroupParams(
            flock_size=self._params.flock_size,
            n_symbols=len(self._params.symbols),
            gate_input_context_multiplier=self._params.gate_input_context_multiplier,
            gate_input_context_avg_window_size=self._params.gate_input_context_avg_window_size,
            seq_count=self._params.seq_count,
            convert_context_to_one_hot=self._params.convert_context_to_one_hot
        ))
        self.add_node(n_specialist)
        self._n_specialist = n_specialist

        n_context_feedback_pass = PassNode((self._params.flock_size, self._params.seq_count))
        n_gate_input_join = JoinNode(dim=1, n_inputs=2)
        n_gate_input_noise = RandomNoiseNode(RandomNoiseParams(amplitude=0.0001))
        n_format_context = SPFormatContextNodeGroup(self._params.seq_count, self._params.flock_size)

        self.add_node(n_context_feedback_pass)
        self.add_node(n_gate_input_join)
        self.add_node(n_gate_input_noise)
        self.add_node(n_format_context)

        # Dataset
        n_dataset_switch = DatasetSwitchNodeGroup(DatasetSwitchNodeGroupParams(
            dataset_params=DatasetAlphabetNodeGroupParams(
                flock_size=self._params.flock_size,
                symbols=self._params.symbols,
                seq_length=self._params.seq_length,
                seq_count=self._params.seq_count,
                seq_repeat=self._params.seq_repeat
            ),
            flock_split=self._params.flock_split
        ))

        self._n_dataset_switch = n_dataset_switch
        self.add_node(n_dataset_switch)

        # dataset to specialist
        Connector.connect(n_dataset_switch.outputs.output, n_specialist.inputs.input)
        # specialist to gate
        Connector.connect(n_specialist.outputs.context_feedback, n_context_feedback_pass.inputs.input, is_backward=True)
        Connector.connect(n_context_feedback_pass.outputs.output, n_gate_input_join.inputs[0])
        # dataset to gate
        Connector.connect(n_dataset_switch.outputs.sequence_id_one_hot, n_gate_input_join.inputs[1])
        Connector.connect(n_gate_input_join.outputs.output, n_gate_input_noise.inputs.input)
        Connector.connect(n_gate_input_noise.outputs.output, n_gate.inputs.sp.data_input)
        # gate to specialist
        Connector.connect(n_gate.outputs.sp.forward_clusters, n_format_context.inputs.input)
        Connector.connect(n_format_context.outputs.output, n_specialist.inputs.context_input)

        # Measuring accuracy
        # Fork
        n_fork_dataset = ForkNode(0, [self._params.flock_split, self._params.flock_size - self._params.flock_split])
        n_fork_prediction = ForkNode(0, [self._params.flock_split, self._params.flock_size - self._params.flock_split])
        self.add_node(n_fork_dataset)
        self.add_node(n_fork_prediction)
        Connector.connect(n_dataset_switch.outputs.output, n_fork_dataset.inputs.input)
        Connector.connect(n_specialist.outputs.output, n_fork_prediction.inputs.input)

        self._n_accuracy_single_1 = AccuracyNode(1, name='Accuracy single 1')
        self.add_node(self._n_accuracy_single_1)
        Connector.connect(n_fork_dataset.outputs[0], self._n_accuracy_single_1.inputs.input_a)
        Connector.connect(n_fork_prediction.outputs[0], self._n_accuracy_single_1.inputs.input_b)

        self._n_accuracy_single_2 = AccuracyNode(1, name='Accuracy single 2')
        self.add_node(self._n_accuracy_single_2)
        Connector.connect(n_fork_dataset.outputs[1], self._n_accuracy_single_2.inputs.input_a)
        Connector.connect(n_fork_prediction.outputs[1], self._n_accuracy_single_2.inputs.input_b)

        self._n_accuracy_1 = AccuracyNode(self._params.accuracy_average_steps, name='Accuracy 1')
        self.add_node(self._n_accuracy_1)
        Connector.connect(n_fork_dataset.outputs[0], self._n_accuracy_1.inputs.input_a)
        Connector.connect(n_fork_prediction.outputs[0], self._n_accuracy_1.inputs.input_b)

        self._n_accuracy_2 = AccuracyNode(self._params.accuracy_average_steps, name='Accuracy 2')
        self.add_node(self._n_accuracy_2)
        Connector.connect(n_fork_dataset.outputs[1], self._n_accuracy_2.inputs.input_a)
        Connector.connect(n_fork_prediction.outputs[1], self._n_accuracy_2.inputs.input_b)

    def init_sp_clusters(self):
        self._n_dataset_switch.init_sp_clusters()
        self._n_specialist.init_sp_clusters()

    def set_sequences_filter(self, dataset_id: int, enabled_sequences: List[bool]):
        self._n_dataset_switch.set_sequences_filter(dataset_id, enabled_sequences)
        logger.info(f'sequence filter: {enabled_sequences}, step: {self._step_count}')

    @property
    def active_dataset(self) -> int:
        return self._active_dataset

    @active_dataset.setter
    def active_dataset(self, value: int):
        self._active_dataset = value
        self._n_dataset_switch.select_dataset(value)
        logger.info(f'active dataset: {value}, step: {self._step_count}')

    def get_properties(self) -> List[ObserverPropertiesItem]:
        props = super().get_properties()
        return props + [
            self._prop_builder.collapsible_header(f'Experiment', True),
            self._prop_builder.auto("Active dataset", type(self).active_dataset),
            *self._dataset_controll_buttons(0),
            *self._dataset_controll_buttons(1)
        ]

    def _dataset_controll_buttons(self, dataset_id: int) -> List[ObserverPropertiesItem]:
        patterns = [
            [False, False, False] * 2,
            [True, False, False] * 2,
            [False, True, False] * 2,
            [False, False, True] * 2,
            [True, True, False] * 2,
            [False, True, True] * 2,
            [True, False, True] * 2,
            [True, True, True] * 2,
            [True, True, True, True, True, False],
        ]

        def format_pattern(pattern: List[bool]) -> str:
            return "".join(['1' if p else '0' for p in pattern])

        return [
            self._prop_builder.button(f'Dataset {dataset_id} - {format_pattern(p)}',
                                      partial(self.set_sequences_filter, dataset_id, p))
            for p in patterns
        ]

    def get_accuracy_single_1(self) -> float:
        return self._n_accuracy_single_1.outputs.accuracy.tensor.item()

    def get_accuracy_per_flock_single_1(self) -> List[float]:
        return self._n_accuracy_single_1.outputs.accuracy_per_flock.tensor.tolist()

    def get_accuracy_1(self) -> float:
        return self._n_accuracy_1.outputs.accuracy.tensor.item()

    def get_accuracy_per_flock_1(self) -> List[float]:
        return self._n_accuracy_1.outputs.accuracy_per_flock.tensor.tolist()

    def get_accuracy_single_2(self) -> float:
        return self._n_accuracy_single_2.outputs.accuracy.tensor.item()

    def get_accuracy_per_flock_single_2(self) -> List[float]:
        return self._n_accuracy_single_2.outputs.accuracy_per_flock.tensor.tolist()

    def get_accuracy_2(self) -> float:
        return self._n_accuracy_2.outputs.accuracy.tensor.item()

    def get_accuracy_per_flock_2(self) -> List[float]:
        return self._n_accuracy_2.outputs.accuracy_per_flock.tensor.tolist()

    def get_actual_sequence_ids(self) -> List[int]:
        return self._n_dataset_switch.outputs.dataset_2_scalar_sequence_ids.tensor.tolist()

    def step(self):
        super().step()
        self._step_count += 1
 def setup_method(self):
     self.data = Data()
     self.builder = ObserverPropertiesBuilder(self.data)
Beispiel #21
0
 def __init__(self, tensor_provider: 'TensorProvider'):
     self._prop_builder = ObserverPropertiesBuilder(self)
     self._tensor_provider = tensor_provider
 def test_parse_list(self, input, expected):
     assert expected == ObserverPropertiesBuilder._parse_list(input, int)
Beispiel #23
0
 def __init__(self):
     self._prop_builder = ObserverPropertiesBuilder(self)
     self.tensor_view_projection = TensorViewProjection(is_buffer=False)
 def test_format_list(self, input, expected):
     assert expected == ObserverPropertiesBuilder._format_list(input, lambda i: f'<{i}>')
 def test_auto_needs_instance_to_be_set(self):
     with raises(IllegalArgumentException, match=r'.*Instance not set.*'):
         builder = ObserverPropertiesBuilder()
         builder.auto("Test", Data.p_int)
 def test_resolve_state(self, state, enabled, result, should_pass):
     if should_pass:
         assert result == ObserverPropertiesBuilder._resolve_state(state, enabled)
     else:
         with raises(IllegalArgumentException):
             ObserverPropertiesBuilder._resolve_state(state, enabled)
Beispiel #27
0
class TensorViewProjection(ABC, PropertiesProvider):
    _real_shape: List[int]
    _shape: List[int]
    _items_per_row: int
    _min: float
    _max: float
    _sum_dim: int
    _prop_builder: ObserverPropertiesBuilder

    @property
    def min(self) -> float:
        return self._min

    @min.setter
    def min(self, value: float):
        self._min = value

    @property
    def max(self) -> float:
        return self._max

    @max.setter
    def max(self, value: float):
        self._max = value

    @property
    def items_per_row(self) -> int:
        return self._items_per_row

    @items_per_row.setter
    def items_per_row(self, value: int):
        self._items_per_row = value

    @property
    def shape(self) -> List[int]:
        return self._shape

    @shape.setter
    def shape(self, value: List[int]):
        self._shape = value

    @property
    def real_shape(self) -> List[int]:
        return self._real_shape

    @property
    def sum_dim(self) -> Optional[int]:
        return self._sum_dim

    @sum_dim.setter
    def sum_dim(self, value: Optional[int]):
        validate_dimension_vs_shape(value, self._real_shape)
        self._sum_dim = value

    def __init__(self, is_buffer: bool):
        self._real_shape = []
        self._shape = []
        self._items_per_row = 1
        self._min = 0
        self._max = 1
        self._logger = logging.getLogger(
            f"{__name__}.Observer.{type(self).__name__}")
        self._is_buffer = is_buffer
        self._sum_dim = None
        self._prop_builder = ObserverPropertiesBuilder(self)

    def get_properties(self) -> List[ObserverPropertiesItem]:
        return [
            self._prop_builder.auto('Min',
                                    type(self).min),
            self._prop_builder.auto('Max',
                                    type(self).max),
            self._prop_builder.auto('Items per row',
                                    type(self).items_per_row),
            self._prop_builder.auto('Shape',
                                    type(self).shape),
            self._prop_builder.auto('Real shape',
                                    type(self).real_shape),
            self._prop_builder.auto('Sum over dim',
                                    type(self).sum_dim)
        ]

    def _compute_tile_dimensions(self, size: List[int], is_rgb: bool):
        if len(self._shape) >= 2:
            return self._shape[-2:]

        # no shape defined - use last two dimensions of the tensor
        if is_rgb:
            size = size[:-1]  # drop channel dimensions

        height, width = [1, 1] if len(size) < 2 else size[-2:]
        if self._is_buffer:
            # buffers have dimensions [flock_size, buffer_size, data]
            # height is set to 1 to cover just one buffer item in the tile
            height = 1
        return height, width

    def transform_tensor(
            self, tensor: torch.Tensor,
            is_rgb: bool) -> Tuple[torch.Tensor, TensorViewProjectionUIParams]:

        tensor = self.apply_any_transforms(tensor)
        self._real_shape = list(tensor.shape)

        is_rgb_three_channels = is_rgb and (tensor.size()[-1] == 3)
        is_rgb_single_channel = is_rgb and (tensor.size()[-1] == 1)

        height, width = self._compute_tile_dimensions(
            list(tensor.size()), is_rgb_single_channel
            or is_rgb_three_channels)
        if not is_rgb_three_channels:
            tensor = tensor.contiguous().view([-1])
            tensor = self._colorize(tensor, self._min, self._max)
        else:
            tensor = self._rgb_transform(tensor, self._min, self._max)

        # Create column vector with all 2D images from top to bottom
        return self._compose_tensor_rgb(width, height, tensor)

    def apply_any_transforms(self, tensor: torch.Tensor) -> torch.Tensor:

        # We don't compute transforms on all NaN tensors - the simulation has probably not yet started
        # noinspection PyUnresolvedReferences
        if self._sum_dim is not None and torch.isnan(tensor).all().item() == 0:
            tensor = tensor.sum(self._sum_dim)
        return tensor

    @staticmethod
    def _column_tiling_indices(device: str, count: int, width: int,
                               height: int):
        a = torch.arange(0, count, device=device)
        a = a.view(-1, width * height)
        i = torch.arange(0, width * height, device=device)
        i = i.view(width, height).transpose(0, 1).contiguous().view(-1)
        ri = a.index_select(1, i).view(-1)
        return ri

    # def _compose_tensor_simple(self, width: int, height: int, count: int, column_tensor: torch.Tensor):
    #     result_dims = [
    #         math.ceil(count / self._items_per_row) * height,
    #         self._items_per_row * width
    #     ]
    #
    #     # Pad column tensor so it can be viewed as canvas of result_dims
    #     column_height = column_tensor.size()[0]
    #     excess_image_rows = column_height % height
    #     missing_image_rows = 0 if excess_image_rows == 0 else height - excess_image_rows
    #
    #     excess_images = count % self._items_per_row
    #     missing_images = 0 if excess_images == 0 else self._items_per_row - excess_images
    #
    #     pad_tensor = torch.zeros((missing_image_rows + missing_images * height, width), device=column_tensor.device)
    #
    #     padded_column_tensor = torch.cat([column_tensor, pad_tensor]).view([-1, width])
    #
    #     # Compute tiling indices
    #     image_rows = padded_column_tensor.size(0)
    #     indices = self._column_tiling_indices(column_tensor.device, image_rows, self._items_per_row, height)
    #
    #     # Reorder tensor in order to make tiling
    #     images = padded_column_tensor.index_select(0, indices)
    #     return images.view(result_dims), TensorViewProjectionUIParams(width, height, self._items_per_row)

    @staticmethod
    def _compute_padding(value: int, divisor: int) -> int:
        """Compute how many elements have to be added so value is divisible by divisor.

        Args:
            value:
            divisor:

        Returns:
            Number of elements that needs to be added.
        """
        excess = value % divisor
        return 0 if excess == 0 else divisor - excess

    def _compose_tensor_rgb(self, width: int, height: int,
                            tensor: torch.Tensor):
        pad_color = torch.tensor([0.3, 0.3, 0.3],
                                 dtype=tensor.dtype,
                                 device=tensor.device)

        # if len(tensor.size()) < 3:
        #     tensor = tensor.view(1, 1, -1)
        # if tensor.size()[2] < 3:
        #     tensor = tensor.expand(tensor.size()[0], tensor.size()[1], 3)

        # Pad to fit width
        assert tensor.numel() % 3 == 0, 'Tensor should be RGB now'
        missing_values = self._compute_padding(math.ceil(tensor.numel() / 3),
                                               width)
        tensor = torch.cat(
            [tensor.view([-1, 3]),
             pad_color.expand([missing_values, 3])])

        column_tensor = tensor.view([-1, width, 3])
        count = math.ceil(column_tensor.size()[0] / height)

        # result_dims = [
        #     math.ceil(count / self._items_per_row) * height,
        #     self._items_per_row * width,
        #     3
        # ]
        # Pad column tensor so it can be viewed as canvas of result_dims
        column_height = column_tensor.size()[0]
        missing_image_rows = self._compute_padding(column_height, height)
        missing_images = self._compute_padding(count, self._items_per_row)

        pad_tensor = pad_color.expand(
            (missing_image_rows + missing_images * height, width, 3))

        padded_column_tensor = torch.cat([column_tensor,
                                          pad_tensor]).view([-1, width, 3])

        # Compute tiling indices
        image_rows = padded_column_tensor.size(0)
        indices = self._column_tiling_indices(str(column_tensor.device),
                                              image_rows, self._items_per_row,
                                              height)

        # Reorder tensor in order to make tiling
        images = padded_column_tensor.index_select(0, indices)
        return images.view([-1, self._items_per_row * width,
                            3]), TensorViewProjectionUIParams(
                                width, height, self._items_per_row, count)

    @staticmethod
    def _squash_all_dims_but_last(original_dims: List[int]) -> List[int]:
        """Collect all the dimensions but the last one.

        Intention: provide a 2D interpretation of the ND tensor for UI.
        """
        product = 1
        for dim in original_dims:
            product *= dim

        result = int(product / original_dims[-1])
        return [result, original_dims[-1]]

    @staticmethod
    def _colorize(data: torch.Tensor, minimum: float,
                  maximum: float) -> torch.Tensor:
        """Colorize data.

        Interval (-inf, -maximum) is clipped to value 1 - red color
        Interval (-maximum, -minimum) is scaled linearly to (1, 0) - red color
        Interval (-minimum, minimum) is clipped to value 0 -  black color
        Interval (minimum, maximum) is scaled linearly to (0, 1) - green color
        Interval (maximum, +inf) is clipped to value 1 - green color
        Value -inf is set to value 1 - magenta
        Value +inf is set to value 1 - cyan
        Value NaN is set to value 1 - blue
        """
        data = data.float()

        # print(f'Device {data.device}')
        # define colors
        negative_color = torch.tensor([1.0, 0.0, 0.0], device=data.device)
        positive_color = torch.tensor([0.0, 1.0, 0.0], device=data.device)
        nan_color = torch.tensor([0.0, 0.0, 1.0], device=data.device)
        positive_inf_color = torch.tensor([0.0, 1.0, 1.0], device=data.device)
        negative_inf_color = torch.tensor([1.0, 0.0, 1.0], device=data.device)

        # prepare substitution masks
        mask_positive = data > minimum
        mask_negative = data < -minimum

        mask_positive_clip = data >= maximum
        mask_negative_clip = data <= -maximum
        mask_nan = torch.isnan(data)
        inf = float('inf')
        ninf = -float('inf')
        mask_positive_inf = data == inf
        mask_negative_inf = data == ninf

        # create result
        result_dims = data.size() + (3, )
        result = torch.zeros(result_dims, device=data.device)

        # linear scaling of negative values
        if mask_negative.any():
            zeros = torch.zeros(result_dims, device=data.device)
            zeros[mask_negative] = negative_color
            processed_data = (-data - minimum) / (maximum - minimum)
            result += zeros * processed_data.unsqueeze(data.dim())

        # linear scaling of positive values
        if mask_positive.any():
            zeros = torch.zeros(result_dims, device=data.device)
            zeros[mask_positive] = positive_color
            processed_data = (data - minimum) / (maximum - minimum)
            result += zeros * processed_data.unsqueeze(data.dim())

        # substitute fixed values
        color_substitutions = [
            (mask_positive_clip, positive_color),
            (mask_negative_clip, negative_color),
            (mask_nan, nan_color),
            (mask_positive_inf, positive_inf_color),
            (mask_negative_inf, negative_inf_color),
        ]
        for mask, color in color_substitutions:
            if mask.any():
                result[mask] = color

        return result

    @staticmethod
    def _rgb_transform(data: torch.Tensor, minimum: float, maximum: float):
        data = data.float()
        data = (data - minimum) / (maximum - minimum)

        # prepare substitution masks
        mask_max_clip = data > 1.0
        mask_min_clip = data < 0.0

        # substitute fixed values
        color_substitutions = [
            (mask_max_clip, 1.0),
            (mask_min_clip, 0.0),
        ]
        for mask, color in color_substitutions:
            if mask.any():
                data[mask] = color

        return data

    def _inverse_transform_coordinates(self, dims: List[int], x: int,
                                       y: int) -> int:
        height, width = self._compute_tile_dimensions(dims, False)
        tile_x = math.floor(x / width)
        tile_y = math.floor(y / height)
        tile_index = tile_y * self._items_per_row + tile_x
        pos_in_tile_x = x % width
        pos_in_tile_y = y % height

        row = tile_index * height + pos_in_tile_y
        column = pos_in_tile_x
        position = row * width + column
        return position

    def value_at(self, tensor: torch.Tensor, x: int, y: int) -> float:
        # TODO: We perform the transform twice per observation phase - make this more neat
        tensor = self.apply_any_transforms(tensor)

        position = self._inverse_transform_coordinates(list(tensor.size()), x,
                                                       y)
        if position >= tensor.numel():
            return float('nan')
        else:
            return tensor.view([-1])[position].item()
Beispiel #28
0
class SpatialPoolerParamsProps(Initializable, ABC):
    _flock: SPFlock
    _prop_builder: ObserverPropertiesBuilder
    _params: SpatialPoolerParams

    def __init__(self, params: SpatialPoolerParams, flock: SPFlock):
        self._flock = flock
        self._params = params
        self._prop_builder = ObserverPropertiesBuilder(self, source_type=ObserverPropertiesItemSourceType.MODEL)

    def is_initialized(self) -> bool:
        return self._flock is not None

    @property
    def input_size(self) -> int:
        return self._params.input_size

    @property
    def buffer_size(self) -> int:
        return self._params.buffer_size

    @buffer_size.setter
    def buffer_size(self, value: int):
        validate_positive_int(value)
        if value < self.batch_size:
            raise FailedValidationException('buffer_size must be equal or greater then batch_size')
        self._params.buffer_size = value

    @property
    def batch_size(self) -> int:
        return self._params.batch_size

    @batch_size.setter
    def batch_size(self, value: int):
        validate_positive_int(value)
        if value > self.buffer_size:
            raise FailedValidationException('batch_size must be equal or less then buffer_size')
        self._params.batch_size = value

    @property
    def learning_rate(self) -> float:
        return self._params.learning_rate

    @learning_rate.setter
    def learning_rate(self, value: float):
        validate_positive_float(value)
        self._params.learning_rate = value
        if self._flock is not None:
            self._flock.learning_rate = value

    @property
    def cluster_boost_threshold(self) -> int:
        return self._params.cluster_boost_threshold

    @cluster_boost_threshold.setter
    def cluster_boost_threshold(self, value: int):
        validate_positive_int(value)
        self._params.cluster_boost_threshold = value

    @property
    def max_boost_time(self) -> int:
        return self._params.max_boost_time

    @max_boost_time.setter
    def max_boost_time(self, value: int):
        validate_positive_int(value)
        self._params.max_boost_time = value

    @property
    def learning_period(self) -> int:
        return self._params.learning_period

    @learning_period.setter
    def learning_period(self, value: int):
        validate_positive_int(value)
        self._params.learning_period = value

    @property
    def enable_learning(self) -> bool:
        return self._params.enable_learning

    def reset_cluster_centers(self):
        self._flock.initialize_cluster_centers()

    @enable_learning.setter
    def enable_learning(self, value: bool):
        self._params.enable_learning = value
        if self._flock is not None:
            self._flock.enable_learning = value

    @property
    def boost(self) -> bool:
        return self._params.boost

    @boost.setter
    def boost(self, value: bool):
        self._params.boost = value

    @property
    def sampling_method(self) -> SamplingMethod:
        return self._params.sampling_method

    @sampling_method.setter
    def sampling_method(self, value: SamplingMethod):
        self._params.sampling_method = value

    def get_properties(self) -> List[ObserverPropertiesItem]:
        return [
            self._prop_builder.auto('SP_input_size', type(self).input_size, edit_strategy=disable_on_runtime,
                                    hint='Size of input vector for one expert'),
            self._prop_builder.auto('SP_buffer_size', type(self).buffer_size, edit_strategy=disable_on_runtime,
                                    hint='Size of the SP buffer - how many last entries (steps) are stored'),
            self._prop_builder.auto('SP_batch_size', type(self).batch_size, edit_strategy=disable_on_runtime,
                                    hint='Size of the SP batch - it is sampled from the buffer'),
            self._prop_builder.auto('SP_learning_rate', type(self).learning_rate,
                                    hint='How much of a distance between the current position of the cluster center '
                                         'and its target position is removed in one learning process run'),
            self._prop_builder.auto('SP_enable_learning', type(self).enable_learning, hint='SP learning is enabled'),
            self._prop_builder.button('SP_reset_cluster_centers', self.reset_cluster_centers),
            #
            self._prop_builder.auto('SP_cluster_boost_threshold', type(self).cluster_boost_threshold,
                                    edit_strategy=disable_on_runtime,
                                    hint='If the cluster is without any datapoint for this many consecutive steps, '
                                         'the boosting starts'),
            self._prop_builder.auto('SP_max_boost_time', type(self).max_boost_time, edit_strategy=disable_on_runtime,
                                    hint='Is any cluster is boosted for this many steps, the boosting targets are '
                                         'recomputed'),
            self._prop_builder.auto('SP_learning_period', type(self).learning_period, edit_strategy=disable_on_runtime,
                                    hint='How often is the learning process run - every Xth of SP forward process runs'),
            self._prop_builder.auto('SP_boost', type(self).boost, edit_strategy=disable_on_runtime,
                                    hint='If false, the SP will not boost clusters which have no datapoints'),
            self._prop_builder.auto('SP_sampling_method', type(self).sampling_method, edit_strategy=disable_on_runtime,
                                    hint='<ul>'
                                         '<li>LAST_N - take last n entries from the buffer</li>'
                                         '<li>UNIFORM - sample uniformly from the whole buffer</li>'
                                         '<li>BALANCED - sample from the whole buffer so that the counts of points '
                                         'belonging to each cluster are approximately equal</li>'
                                         '</ul>'),
        ]