def compute(learn=True):
     context.assign_flat_concatenate([l4.active, l23.active])
     if self.l4_only:
         # Test L4 in isolation, Disable feedback from L2/3 to L4.
         zeros_like_l23 = SDR(l23.active)
         zeros_like_l23.zero()
         context.assign_flat_concatenate([l4.active, zeros_like_l23])
     l4.compute()
     l23.compute()
     if learn:
         l4.learn()
         l23.learn()
class SpatialPooler:
    """
    This class handles the mini-column structures and the feed forward 
    proximal inputs to each cortical mini-column.

    [CITE THE SP PAPER HERE]

    Topology: This implements local inhibition with topology by creating many
    small groups of mini-columns which are distributed across the input space.
    All of the mini-columns in a group are located at the same location in the
    input space, and they inhibit each other equally.   Each group of mini-
    columns is self contained; groups of mini-columns do not inhibit each other
    or interact.  Instead of creating a large spatial pooler with topology, this
    creates many small spatial poolers with topology between the spatial
    poolers.
    """
    def __init__(self,
        input_sdr,
        mini_columns,     # Integer,
        sparsity,
        potential_pool,
        permanence_inc,
        permanence_dec,
        permanence_thresh,
        segments            = 1,
        macro_columns       = (1,),
        init_dist           = (0, 0),
        boosting_alpha      = None,
        active_thresh       = 0,
        radii               = tuple()):
        """
        Argument mini_columns is an Integer, the number of mini-columns in each 
            macro-column.

        Argument macro_columns is a tuple of integers.  Dimensions of macro
            column array.  These are topological dimensions.  Macro columns are
            distributed across the input space in a uniform grid.

        Optional Argument radii defines the input topology.  Trailing extra
            input dimensions are treated as non-topological dimensions.

        Argument segments is an Integer, number proximal segments for each
            mini-column.

        Argument sparsity ...

        Argument potential_pool ...

        Optional Argument boosting_alpha is the small constant used by the
        moving exponential average which tracks each mini-columns activation
        frequency.  Default value is None, which disables boosting altogether.

        Argument permanence_inc ...
        Argument permanence_dec ...
        Argument permanence_thresh ...
        Argument init_dist is (mean, std) of initial permanence values, which is a
                 gaussian random distribution.

        Argument active_thresh ...
        """
        assert(isinstance(input_sdr, SDR))
        assert(potential_pool > 1) # Number of synapses, not percent.
        self.mini_columns     = int(round(mini_columns))
        self.macro_columns    = tuple(int(round(dim)) for dim in macro_columns)
        self.radii            = radii
        self.segments         = int(round(segments))
        self.columns          = SDR(self.macro_columns + (self.mini_columns,),
            activation_frequency_alpha = boosting_alpha,
            average_overlap_alpha      = boosting_alpha,)
        self.sparsity         = sparsity
        self.active_thresh    = active_thresh
        self.potential_pool   = potential_pool
        self.age              = 0

        segment_shape = self.macro_columns + (self.mini_columns, self.segments)
        self.synapses = SynapseManager(
            input_sdr              = input_sdr,
            output_sdr             = SDR(segment_shape),
            radii                  = radii,
            init_dist              = init_dist,
            permanence_inc         = permanence_inc,
            permanence_dec         = permanence_dec,
            permanence_thresh      = permanence_thresh,
            initial_potential_pool = self.potential_pool,)

        if init_dist == (0, 0):
            # Nupic's SP init method
            # TODO: Make this a permanent part of the synapses class?  
            # Change init_dist argument to accept a string 'sp' ?
            for idx in range(self.synapses.output_sdr.size):
                pp = self.synapses.postsynaptic_permanences[idx].shape[0]
                connnected  = np.random.random(pp) > .5
                permanences = np.random.random(pp)
                permanences[connnected] *= 1 - self.synapses.permanence_thresh
                permanences[connnected] += self.synapses.permanence_thresh
                permanences[np.logical_not(connnected)] *= self.synapses.permanence_thresh
                self.synapses.postsynaptic_permanences[idx] = np.array(permanences, dtype=np.float32)
            self.synapses.rebuild_indexes()

        # Break ties randomly, in a constant unchanging manner.
        self.tie_breakers = np.random.uniform(0, .5, size=self.synapses.output_sdr.dimensions)

        self.boosting_alpha = boosting_alpha
        if boosting_alpha is not None:
            # Make a dedicated SDR to track segment activation frequencies for
            # boosting.
            self.boosting = SDR(self.synapses.output_sdr,
                                activation_frequency_alpha = boosting_alpha,
                                average_overlap_alpha      = boosting_alpha,)
            # Initialize to the target activation frequency/sparsity.
            self.boosting.activation_frequency.fill(self.sparsity / self.segments)

        self.reset()

    def reset(self):
        self.columns.zero()
        self.prev_updates = np.full(self.synapses.output_sdr.size, None)

    def compute(self, input_sdr=None, input_learning_sdr=None, learn=True):
        """
        """
        excitement, potential_excitement = self.synapses.compute(input_sdr=input_sdr)
        excitement = excitement + self.tie_breakers

        # Logarithmic Boosting Function.
        if self.boosting_alpha is not None:
            target_sparsity = self.sparsity / self.segments
            boost = np.log2(self.boosting.activation_frequency) / np.log2(target_sparsity)
            boost = np.nan_to_num(boost)
            excitement *= boost

        # Divide excitement by the number of connected synapses.
        n_con_syns = self.synapses.postsynaptic_connected_count
        n_con_syns = n_con_syns.reshape(self.synapses.output_sdr.dimensions)
        percent_overlap = excitement / n_con_syns

        # Reduce the segment dimension to each mini-columns single most excited
        # segment.
        column_excitement = np.max(percent_overlap, axis=-1)

        # Stable SP and Grid Cells modify the excitement here.
        column_excitement = self._compute_hook(column_excitement)

        # Activate mini-columns.  First determine how many mini-columns to
        # activate in each macro-column.
        n_activate = max(1, int(round(self.mini_columns * self.sparsity)))

        # Activate the most excited mini-columns in each macro-column.
        k = self.mini_columns - n_activate
        mini_index = np.argpartition(column_excitement, k, axis=-1)[..., k:]

        # Convert activations from mini-column indices to macro-column indices.
        macro_index    = tuple(np.indices(mini_index.shape))[:-1]
        winner_columns = tuple(x.reshape(-1) for x in macro_index + (mini_index,))
        # Filter out columns with sub-threshold excitement.
        winner_excitement = np.max(excitement[winner_columns], axis=-1)
        winner_columns    = tuple(np.compress(winner_excitement >= self.active_thresh,
                                      winner_columns, axis=1))

        # Output the results into the columns sdr.
        self.columns.index = winner_columns

        if learn:
            seg_idx = np.argmax(excitement[winner_columns], axis=-1)
            learning_segments = winner_columns + (seg_idx,)
            self.prev_updates = self.synapses.learn(
                input_sdr    = input_learning_sdr,
                output_sdr   = learning_segments,
                prev_updates = self.prev_updates,)

            # Update the exponential moving average of each segments activation frequency.
            if self.boosting_alpha is not None:
                self.boosting.assign(learning_segments)

            self.age += 1

        return self.columns

    def _compute_hook(self, x):
        """Subclasses override this method."""
        return x

    def statistics(self, _class_name='Spatial Pooler'):
        stats = _class_name + ' '
        stats += self.synapses.statistics()
        stats += 'Columns ' + self.columns.statistics()

        if self.boosting_alpha is not None:
            if self.segments > 1:
                stats  += 'Segments ' + self.boosting.statistics()
            af         = self.boosting.activation_frequency
            target     = self.sparsity / self.segments
            boost_min  = np.log2(np.min(af))  / np.log2(target)
            boost_mean = np.log2(np.mean(af)) / np.log2(target)
            boost_max  = np.log2(np.max(af))  / np.log2(target)
            stats += '\tLogarithmic Boosting Multiplier min/mean/max  {:-.04g}% / {:-.04g}% / {:-.04g}%\n'.format(
                    boost_min   * 100,
                    boost_mean  * 100,
                    boost_max   * 100,)
        return stats
示例#3
0
class TemporalMemory:
    """
    This implementation is based on the paper: Hawkins J. and Ahmad S. (2016)
    Why Neurons Have Thousands of Synapses, a Theory of Sequency Memory in
    Neocortex. Frontiers in Neural Circuits 10:23 doi: 10.3389/fncir.2016.00023
    """
    def __init__(self, 
        parameters,
        column_sdr,
        apical_sdr=None,
        inhibition_sdr=None,
        context_sdr=None,
        ):
        """
        Argument parameters is an instance of TemporalMemoryParameters
        Argument column_dimensions ...
        """
        assert(isinstance(parameters, TemporalMemoryParameters))
        self.args = args         = parameters
        assert(isinstance(column_sdr, SDR))
        self.columns             = column_sdr
        self.cells_per_column    = int(round(args.cells_per_column))
        if self.cells_per_column < 1:
            raise ValueError("Cannot create TemporalMemory with cells_per_column < 1.")
        self.segments_per_cell   = int(round(args.segments_per_cell))
        self.active              = SDR((self.columns.size, self.cells_per_column),
                                        activation_frequency_alpha = 1/1000,
                                        average_overlap_alpha      = 1/1000,)
        self.anomaly_alpha       = 1/1000
        self.mean_anomaly        = 0

        self.basal = Dendrite(
            input_sdr            = SDR(context_sdr if context_sdr is not None else self.active),
            active_sdr           = SDR(self.active),
            segments_per_cell    = args.segments_per_cell,
            synapses_per_segment = args.synapses_per_segment,
            initial_segment_size = args.initial_segment_size,
            add_synapses         = args.add_synapses,
            learning_threshold   = args.learning_threshold,
            predictive_threshold = args.predictive_threshold,
            permanence_inc       = args.permanence_inc,
            permanence_dec       = args.permanence_dec,
            permanence_thresh    = args.permanence_thresh,
            mispredict_dec       = args.mispredict_dec,)

        if apical_sdr is None:
            self.apical = None
        else:
            assert(isinstance(apical_sdr, SDR))
            self.apical = Dendrite(
                input_sdr            = apical_sdr,
                active_sdr           = self.active,
                segments_per_cell    = args.segments_per_cell,
                synapses_per_segment = args.synapses_per_segment,
                initial_segment_size = args.initial_segment_size,
                add_synapses         = args.add_synapses,
                learning_threshold   = args.learning_threshold,
                predictive_threshold = args.predictive_threshold,
                permanence_inc       = args.permanence_inc,
                permanence_dec       = args.permanence_dec,
                permanence_thresh    = args.permanence_thresh,
                mispredict_dec       = args.mispredict_dec,)

        if inhibition_sdr is None:
            self.inhibition = None
        else:
            assert(isinstance(inhibition_sdr, SDR))
            self.inhibition = Dendrite(
                input_sdr            = inhibition_sdr,
                active_sdr           = self.active,
                segments_per_cell    = args.segments_per_cell,
                synapses_per_segment = args.synapses_per_segment,
                initial_segment_size = args.initial_segment_size,
                add_synapses         = args.add_synapses,
                learning_threshold   = args.learning_threshold,
                predictive_threshold = args.predictive_threshold,
                permanence_inc       = args.permanence_inc,
                permanence_dec       = args.permanence_dec,
                permanence_thresh    = args.permanence_thresh,
                mispredict_dec       = 0,) # Is not but should be an inhibited segment in an active cell.

        self.reset()

    def reset(self):
        self.active.zero()
        self.reset_state = True

    def compute(self,
        context_sdr=None,
        column_sdr=None,
        apical_sdr=None,
        inhibition_sdr=None,):
        """
        Attribute anomaly, mean_anomaly are the fraction of neuron activations
                  which were predicted.  Range [0, 1]
        """
        ########################################################################
        # PHASE 1:  Make predictions based on the previous timestep.
        ########################################################################
        if context_sdr is None:
            context_sdr = self.active
        basal_predictions = self.basal.compute(input_sdr=context_sdr)
        predictions       = basal_predictions

        if self.apical is not None:
            apical_predictions = self.apical.compute(input_sdr=apical_sdr)
            predictions        = np.logical_or(predictions, apical_predictions)

        # Inhibition cancels out predictions.  The technical term is
        # hyper-polarization.  Practically speaking, this is needed so that
        # inhibiting neurons can cause mini-columns to burst.
        if self.inhibition is not None:
            inhibited   = self.inhibition.compute(input_sdr=inhibition_sdr)
            predictions = np.logical_and(predictions, np.logical_not(inhibited))

        ########################################################################
        # PHASE 2:  Determine the currently active neurons.
        ########################################################################
        self.columns.assign(column_sdr)
        columns = self.columns.flat_index

        # Activate all neurons which are in a predictive state and in an active
        # column, unless they are inhibited by apical input.
        active_dense      = predictions[columns]
        col_num, neur_idx = np.nonzero(active_dense)
        # This gets the actual column index, undoes the effect of discarding the
        # inactive columns before the nonzero operation.  
        col_idx           = columns[col_num]
        predicted_active  = (col_idx, neur_idx)

        # If a column activates but was not predicted by any neuron segment,
        # then it bursts.  The bursting columns are the unpredicted columns.
        bursting_columns = np.setdiff1d(columns, col_idx)
        # All neurons in bursting columns activate.
        burst_col_idx  = np.repeat(bursting_columns, self.cells_per_column)
        burst_neur_idx = np.tile(np.arange(self.cells_per_column), len(bursting_columns))
        burst_active   = (burst_col_idx, burst_neur_idx)
        # Apply inhibition to the bursting mini-columns.
        if self.inhibition is not None:
            uninhibited_mask = np.logical_not(inhibited[burst_active])
            burst_active     = np.compress(uninhibited_mask, burst_active, axis=1)

        # TODO: Combined apical and basal predictions can cause L5 cells to
        # spontaneously activate.
        if False:
            volunteers = np.logical_and(self.basal_predictions, self.apical_predictions)
            volunteers = np.nonzero(volunteers.ravel())
            unique1d(volunteers, predicted_active+burst_active)

        self.active.index = tuple(np.concatenate([predicted_active, burst_active], axis=1))

        # Only tell the dendrite about active cells which are allowed to learn.
        bursting_learning = (
            bursting_columns,
            np.random.randint(0, self.cells_per_column, size=len(bursting_columns)))
        # TODO: This will NOT work for CONTEXT, TM ONLY.
        self.basal.input_sdr.assign(self.basal.active_sdr) # Only learn about the winner cells from last cycle.
        self.basal.active_sdr.index = tuple(np.concatenate([predicted_active, bursting_learning], axis=1))

        # Anomally metric.
        self.anomaly      = np.array(burst_active).shape[1] / len(self.active)
        alpha             = self.anomaly_alpha
        self.mean_anomaly = (1-alpha)*self.mean_anomaly + alpha*self.anomaly

    def learn(self):
        """
        Learn about the previous to current timestep transition.
        """
        if self.reset_state:
            # Learning on the first timestep after a reset is not useful. The
            # issue is that waking up after a reset is inherently unpredictable.
            self.reset_state = False
            return

        # NOTE: All cells in a bursting mini-column will learn.  This includes
        # starting new segments if necessary.  This is different from Numenta's
        # TM which choses one cell to learn on a bursting column.  If in fact
        # all newly created segments work correctly, then I may in fact be
        # destroying any chance of it learning a unique representation of the
        # anomalous sequence by assigning all cells to represent it.  I was
        # thinking that maybe this would work anyways because the presynapses
        # are chosen randomly but now its evolved an initial segment size of 19!
        # FIXED?

        # Use the SDRs which were given durring the compute phase.
        # inputs = previous winner cells, active = current winner cells
        self.basal.learn(active_sdr=None)
        if self.apical is not None:
            self.apical.learn(active_sdr=self.active)
        if self.inhibition is not None:
            self.inhibition.learn(active_sdr=self.active)

    def statistics(self):
        stats  = 'Temporal Memory\n'
        stats += 'Predictive Segments ' + self.basal.statistics()
        if self.apical is not None:
            stats += 'Apical Segments ' + self.apical.statistics()
        if self.inhibition is not None:
            stats += 'Inhibition Segments ' + self.inhibition.statistics()

        stats += "Mean anomaly %g\n"%self.mean_anomaly
        stats += 'Activation statistics ' + self.active.statistics()

        return stats