Пример #1
0
 def __init__(
     self,
     parameters,
     cortex_sdr,
     size,
     radii=None,
 ):
     assert (radii is None)  # TODO: Striatum should allow topology.
     self.args = args = parameters
     self.size = size
     self.active = SDR((size, ), activation_frequency_alpha=0.005)
     self.synapses = Dendrite(
         input_sdr=cortex_sdr,
         active_sdr=self.active,
         segments_per_cell=args.segments_per_cell,
         synapses_per_segment=args.synapses_per_segment,
         predictive_threshold=args.predictive_threshold,
         learning_threshold=args.learning_threshold,
         permanence_thresh=args.permanence_thresh,
         permanence_inc=args.permanence_inc,
         permanence_dec=args.permanence_dec,
         mispredict_dec=args.mispredict_dec,
         add_synapses=args.add_synapses,
         initial_segment_size=args.initial_segment_size,
     )
 def compute(learn=True):
     context.assign_flat_concatenate([l4.active, l23.active])
     if self.l4_only:
         # Test L4 in isolation, Disable feedback from L2/3 to L4.
         zeros_like_l23 = SDR(l23.active)
         zeros_like_l23.zero()
         context.assign_flat_concatenate([l4.active, zeros_like_l23])
     l4.compute()
     l23.compute()
     if learn:
         l4.learn()
         l23.learn()
Пример #3
0
    def __init__(self, cerebrum_parameters, region_parameters,
        input_sdr,
        context_sdr,
        apical_sdr,
        inhibition_sdr,):
        """
        Argument cerebrum_parameters is an instance of CerebrumParameters.
        Argument region_parameters is an instance of CorticalRegionParameters.
        Argument input_sdr ... feed forward
        Argument context_sdr ... all output layers, flat
        Argument apical_sdr ... from BG D1 cells
        Argument inhibition_sdr ... from BG D2 cells
        """
        assert(isinstance(cerebrum_parameters, CerebrumParameters))
        assert(isinstance(region_parameters, CorticalRegionParameters))
        self.cerebrum_parameters = cerebrum_parameters
        self.region_parameters   = region_parameters

        self.L6_sp = SpatialPooler( cerebrum_parameters.inp_sp,
                                    input_sdr  = input_sdr,
                                    column_sdr = SDR(region_parameters.inp_cols),
                                    radii      = region_parameters.inp_radii,)
        self.L6_tm = TemporalMemory(cerebrum_parameters.inp_tm,
                                    column_sdr  = self.L6_sp.columns,
                                    context_sdr = context_sdr,)

        self.L5_sp = SpatialPooler( cerebrum_parameters.out_sp,
                                    input_sdr   = self.L6_tm.active,
                                    column_sdr  = SDR(region_parameters.out_cols),
                                    radii       = region_parameters.out_radii,)
        self.L5_tm = TemporalMemory(cerebrum_parameters.out_tm,
                                    column_sdr     = self.L5_sp.columns,
                                    apical_sdr     = apical_sdr,
                                    inhibition_sdr = inhibition_sdr,)

        self.L4_sp = SpatialPooler( cerebrum_parameters.inp_sp,
                                    input_sdr   = input_sdr,
                                    column_sdr  = SDR(region_parameters.inp_cols),
                                    radii       = region_parameters.inp_radii,)
        self.L4_tm = TemporalMemory(cerebrum_parameters.inp_tm,
                                    column_sdr  = self.L4_sp.columns,
                                    context_sdr = context_sdr,)

        self.L23_sp = SpatialPooler( cerebrum_parameters.out_sp,
                                    input_sdr   = self.L4_tm.active,
                                    column_sdr  = SDR(region_parameters.out_cols),
                                    radii       = region_parameters.out_radii,)
        self.L23_tm = TemporalMemory(cerebrum_parameters.out_tm,
                                     column_sdr = self.L23_sp.columns)
 def __init__(
     self,
     n=100,
     sparsity=.30,
     module_periods=[6 * (2**.5)**i for i in range(5)],
 ):
     assert (min(module_periods) >= 4)
     self.n = n
     self.sparsity = sparsity
     self.module_periods = module_periods
     self.grid_cells = SDR((n, ))
     self.offsets = np.random.uniform(0,
                                      max(self.module_periods) * 9,
                                      size=(n, 2))
     module_partitions = np.linspace(0, n, num=len(module_periods) + 1)
     module_partitions = list(zip(module_partitions, module_partitions[1:]))
     self.module_partitions = [(int(round(start)), int(round(stop)))
                               for start, stop in module_partitions]
     self.scales = []
     self.angles = []
     self.rot_mats = []
     for period in module_periods:
         self.scales.append(period)
         angle = random.random() * 2 * math.pi
         self.angles.append(angle)
         c, s = math.cos(angle), math.sin(angle)
         R = np.array(((c, -s), (s, c)))
         self.rot_mats.append(R)
     self.reset()
Пример #5
0
    def __init__(self, username, password, config_dir, debug=False):
        self.debug_mode = debug

        # TODO - for now, only supporting a single user. if needed, this can be
        # subsumed by a dictionary to support multiple users.
        self.username = username
        self.password = password

        # generate the 16-byte system GUID
        self.system_guid = [random.randint(0, 255) for _ in range(16)]

        # set the initial configurations for the mock bmc
        self._bmc_cfg = self._load_config(config_dir)
        self.channel_auth_capabilities = self._encode_channel_auth_capabilities(
        )
        self.device = self._encode_bmc_device()
        self.chassis = self._encode_bmc_chassis()
        self.capabilities = self._encode_bmc_capabilities()
        self.dcmi = self._encode_dcmi_capabilities()

        # get an instance of an SDR for the BMC
        self.sdr = SDR.from_config(os.path.join(config_dir, 'sdr.json'))
        self.sdr.add_entries_from_config(
            os.path.join(config_dir, 'sdr_entries.json'))

        # get an instance of a FRU for the BMC
        self.fru = FRU.from_config(os.path.join(config_dir, 'fru.json'))

        # map temporary session ids to their challenge
        self._tmp_session = {}

        # maintain a map of active sessions for tracking purposes
        self._active_sessions = {}
Пример #6
0
 def __init__(self, bits, sparsity, diag=True):
     self.bits         = int(round(bits))
     self.sparsity     = sparsity
     self.on_bits      = int(round(self.bits * self.sparsity))
     self.output_sdr   = SDR((self.bits,))
     if diag:
         print("Enum Encoder: %d bits %.2g%% sparsity"%(bits, 100*sparsity))
Пример #7
0
 def __init__(self, parameters, striatum_sdr):
     """
     """
     assert (isinstance(parameters, GlobusPallidusParameters))
     self.args = args = parameters
     self.striatum = striatum_sdr
     self.num_neurons = int(round(args.num_neurons))
     self.active = SDR((self.num_neurons, ))
     self.synapses = WeightedSynapseManager(
         input_sdr=self.striatum,
         output_sdr=SDR((
             self.num_neurons,
             int(round(args.segments_per_cell)),
         )),
         permanence_thresh=.5,  # Unused parameter.
         permanence_inc=args.permanence_inc,
         permanence_dec=args.permanence_dec,
     )
Пример #8
0
 def __init__(self, parameters, input_sdr, output_shape, output_type):
     """
     Argument parameters must be an instance of SDRC_Parameters.
     Argument output_type must be one of: 'index', 'bool', 'pdf'
     """
     self.args = parameters
     self.input_sdr = SDR(
         input_sdr
     )  # EEK! This copies the arguments current value instead of saving a reference to argument.
     self.output_shape = tuple(output_shape)
     self.output_type = output_type
     assert (self.output_type in ('index', 'bool', 'pdf'))
     # Don't initialize to zero, touch every input+output pair once or twice.
     self.stats = np.random.uniform(0,
                                    5 * self.args.alpha,
                                    size=(self.input_sdr.size, ) +
                                    self.output_shape)
     self.age = 0
Пример #9
0
class StriatumPopulation:
    """
    This class models the D1 and the D2 populations of neurons in the striatum of
    the basal ganglia.
    """
    def __init__(
        self,
        parameters,
        cortex_sdr,
        size,
        radii=None,
    ):
        assert (radii is None)  # TODO: Striatum should allow topology.
        self.args = args = parameters
        self.size = size
        self.active = SDR((size, ), activation_frequency_alpha=0.005)
        self.synapses = Dendrite(
            input_sdr=cortex_sdr,
            active_sdr=self.active,
            segments_per_cell=args.segments_per_cell,
            synapses_per_segment=args.synapses_per_segment,
            predictive_threshold=args.predictive_threshold,
            learning_threshold=args.learning_threshold,
            permanence_thresh=args.permanence_thresh,
            permanence_inc=args.permanence_inc,
            permanence_dec=args.permanence_dec,
            mispredict_dec=args.mispredict_dec,
            add_synapses=args.add_synapses,
            initial_segment_size=args.initial_segment_size,
        )

    def compute(self, cortex_sdr=None):
        self.excitement = self.synapses.compute(input_sdr=cortex_sdr)
        self.excitement = self.excitement + np.random.uniform(
            0, .5, size=self.size)
        k = max(1, int(round(self.args.sparsity * self.size)))
        self.active.flat_index = np.argpartition(-self.excitement, k)[:k]
        return self.active

    def learn(self):
        """Caller must gate this method on the sign of the world current value."""
        self.synapses.learn()

    def copy(self):
        cpy = copy.copy(self)
        cpy.synapses = self.synapses.copy()
        cpy.active = cpy.synapses.active_sdr
        return cpy

    def statistics(self):
        s = self.synapses.statistics()
        s += 'Active ' + self.active.statistics()
        return s
Пример #10
0
 def measure_catagories():
     # Compute every sensation for every object.
     objects_columns = []
     for obj in objects:
         objects_columns.append([])
         for sensation in obj:
             sp.reset()
             enc.encode(sensation)
             sp.compute(learn=False)
             objects_columns[-1].append(SDR(sp.columns))
     sp.reset()
     return objects_columns
Пример #11
0
    def __init__(self, parameters):
        """
        Attribute optic_sdr ... retina's output
        Attribute rgb ... The most recent view, kept as a attribute.

        Attribute position     (X, Y) coords of eye within image, Read/Writable
        Attribute orientation  ... units are radians, Read/Writable
        Attribute scale        ... Read/Writable

        Private Attributes:
            self.eye_coords.shape = (2, view-x, view-y)
            self.eye_coords[input-dim, output-coordinate] = input-coordinate
        """
        self.args = args    = parameters
        self.eye_dimensions = tuple(int(round(ed)) for ed in args.eye_dimensions)
        self.eye_coords     = EyeSensor.complex_eye_coords(self.eye_dimensions,
                                        args.fovea_param_1, args.fovea_param_2)
        self.hue_encoder = ChannelEncoder(  input_shape = self.eye_dimensions,
                                            num_samples = args.num_samples,
                                            sparsity    = args.hue_sparsity,
                                            dtype       = np.float32,
                                            drange      = range(0,360),
                                            wrap        = True,)
        self.sat_encoder = ChannelEncoder(  input_shape = self.eye_dimensions,
                                            num_samples = args.num_samples,
                                            sparsity    = args.sat_sparsity,
                                            dtype       = np.float32,
                                            drange      = (0, 1),
                                            wrap        = False,)
        self.val_encoder = ChannelEncoder(  input_shape = self.eye_dimensions,
                                            num_samples = args.num_samples,
                                            sparsity    = args.val_sparsity,
                                            dtype       = np.float32,
                                            drange      = (0, 1),
                                            wrap        = False,)
        self.edge_encoder = ChannelThresholder(args.edge_encoder,
                                            input_shape = self.eye_dimensions,
                                            dtype       = np.float32,
                                            drange      = (-math.pi, math.pi),
                                            wrap        = True)

        depth = self.hue_encoder.output_shape[-1] + self.edge_encoder.output_shape[-1]
        self.optic_sdr = SDR(self.eye_dimensions + (depth,))
Пример #12
0
    def __init__(self, cerebrum_parameters, region_parameters, input_sdrs):
        self.cerebrum_parameters = cerebrum_parameters
        self.region_parameters   = tuple(region_parameters)
        self.inputs              = tuple(input_sdrs)
        self.age                 = 0
        assert(isinstance(cerebrum_parameters, CerebrumParameters))
        assert(all(isinstance(rgn, CorticalRegionParameters) for rgn in self.region_parameters))
        assert(len(region_parameters) == len(self.inputs))
        assert(all(isinstance(inp, SDR) for inp in self.inputs))

        # The size of the cortex needs to be known before it can be constructed.
        context_size     = 0
        self.apical_sdrs = []
        for rgn_args in self.region_parameters:
            num_cols  = np.product([int(round(dim)) for dim in rgn_args.out_cols])
            cells_per = int(round(cerebrum_parameters.out_tm.cells_per_column))
            context_size += num_cols * cells_per * 2
            L5_dims      = (num_cols * cells_per,)
            self.apical_sdrs.append((SDR(L5_dims), SDR(L5_dims)))
        self.L23_activity  = SDR((context_size/2,))
        self.L5_activity   = SDR((context_size/2,))
        self.context_sdr   = SDR((context_size,))

        # Construct the Basal Ganglia
        self.basal_ganglia = BasalGanglia(cerebrum_parameters.bg,
                                          input_sdr  = self.context_sdr,
                                          output_sdr = self.L5_activity,)

        # Construct the cortex.
        self.regions = []
        for rgn_args, inp, apical in zip(self.region_parameters, input_sdrs, self.apical_sdrs):
            rgn = CorticalRegion(cerebrum_parameters, rgn_args,
                                 input_sdr      = inp,
                                 context_sdr    = self.context_sdr,
                                 apical_sdr     = self.basal_ganglia.d1.active,
                                 inhibition_sdr = self.basal_ganglia.d2.active,)
            self.regions.append(rgn)

        # Construct the motor controls.
        pass
Пример #13
0
    def make_control_vectors(num_cv, pos_stddev, angle_stddev, scale_stddev):
        """
        Argument num_cv is the approximate number of control vectors to create
        Arguments pos_stddev, angle_stddev, and scale_stddev are the standard
                  deviations of the controls effects of position, angle, and 
                  scale.

        Returns pair of control_vectors, control_sdr

        The control_vectors determines what happens for each output. Each
        control is a 4-tuple of (X, Y, Angle, Scale) movements. To move,
        active controls are summed and applied to the current location.
        control_sdr contains the shape of the control_vectors.
        """
        cv_sz = int(round(num_cv // 6))
        control_shape = (6 * cv_sz, )

        pos_controls = [(random.gauss(0, pos_stddev),
                         random.gauss(0, pos_stddev), 0, 0)
                        for i in range(4 * cv_sz)]

        angle_controls = [(0, 0, random.gauss(0, angle_stddev), 0)
                          for angle_control in range(cv_sz)]

        scale_controls = [(0, 0, 0, random.gauss(0, scale_stddev))
                          for scale_control in range(cv_sz)]

        control_vectors = pos_controls + angle_controls + scale_controls
        random.shuffle(control_vectors)
        control_vectors = np.array(control_vectors)

        # Add a little noise to all control vectors
        control_vectors[:, 0] += np.random.normal(0, pos_stddev / 10,
                                                  control_shape)
        control_vectors[:, 1] += np.random.normal(0, pos_stddev / 10,
                                                  control_shape)
        control_vectors[:, 2] += np.random.normal(0, angle_stddev / 10,
                                                  control_shape)
        control_vectors[:, 3] += np.random.normal(0, scale_stddev / 10,
                                                  control_shape)
        return control_vectors, SDR(control_shape)
Пример #14
0
    def __init__(self, parameters, eye_sensor):
        """
        Attribute control_sdr ... eye movement input controls
        Attribute motor_sdr ... internal motor sensor output

        Attribute gaze is a list of tuples of (X, Y, Orientation, Scale)
                  History of recent movements, self.move() updates this.
                  This is cleared by the following methods:
                      self.new_image() 
                      self.center_view()
                      self.randomize_view()
        """
        assert(isinstance(parameters, EyeControllerParameters))
        assert(isinstance(eye_sensor, EyeSensor))
        self.args = args = parameters
        self.eye_sensor  = eye_sensor
        self.control_vectors, self.control_sdr = self.make_control_vectors(
                num_cv       = args.num_cv,
                pos_stddev   = args.pos_stddev,
                angle_stddev = args.angle_stddev,
                scale_stddev = args.scale_stddev,)

        self.motor_position_encoder         = RandomDistributedScalarEncoder(args.position_encoder)
        self.motor_angle_encoder            = RandomDistributedScalarEncoder(args.angle_encoder)
        self.motor_scale_encoder            = RandomDistributedScalarEncoder(args.scale_encoder)
        self.motor_velocity_encoder         = RandomDistributedScalarEncoder(args.velocity_encoder)
        self.motor_angular_velocity_encoder = RandomDistributedScalarEncoder(args.angular_velocity_encoder)
        self.motor_scale_velocity_encoder   = RandomDistributedScalarEncoder(args.scale_velocity_encoder)
        self.motor_encoders = [ self.motor_position_encoder,    # X Posititon
                                self.motor_position_encoder,    # Y Position
                                self.motor_angle_encoder,
                                self.motor_scale_encoder,
                                self.motor_velocity_encoder,    # X Velocity
                                self.motor_velocity_encoder,    # Y Velocity
                                self.motor_angular_velocity_encoder,
                                self.motor_scale_velocity_encoder,]
        self.motor_sdr = SDR((sum(enc.output.size for enc in self.motor_encoders),))
        self.gaze = []
Пример #15
0
class SDR_Classifier:
    """Maximum Likelyhood classifier for SDRs."""
    def __init__(self, parameters, input_sdr, output_shape, output_type):
        """
        Argument parameters must be an instance of SDRC_Parameters.
        Argument output_type must be one of: 'index', 'bool', 'pdf'
        """
        self.args = parameters
        self.input_sdr = SDR(
            input_sdr
        )  # EEK! This copies the arguments current value instead of saving a reference to argument.
        self.output_shape = tuple(output_shape)
        self.output_type = output_type
        assert (self.output_type in ('index', 'bool', 'pdf'))
        # Don't initialize to zero, touch every input+output pair once or twice.
        self.stats = np.random.uniform(0,
                                       5 * self.args.alpha,
                                       size=(self.input_sdr.size, ) +
                                       self.output_shape)
        self.age = 0

    def train(self, input_sdr, out):
        """
        Argument inp is tuple of index arrays, as output from SP's or TP's compute method
        inp = (ndarray of input space dim 0 indexes, ndarray of input space dim 1 indexes, ...)
        """
        self.input_sdr.assign(input_sdr)
        inp = self.input_sdr.flat_index
        alpha = self.args.alpha
        self.stats[inp] *= (1 - alpha)  # Decay
        # Update.
        if self.output_type == 'index':
            # try:
            for out_idx in out:
                self.stats[inp, out_idx] += alpha
            # except TypeError:
            #     self.stats[inp + out] += alpha

        if self.output_type == 'bool':
            self.stats[inp, out] += alpha

        if self.output_type == 'pdf':
            updates = (out - self.stats[inp]) * alpha
            self.stats[inp] += updates

        self.age += 1

    def predict(self, input_sdr=None):
        """
        Argument inputs is ndarray of indexes into the input space.
        Returns probability of each catagory in output space.
        """
        self.input_sdr.assign(input_sdr)
        pdf = self.stats[self.input_sdr.flat_index]
        if True:
            # Combine multiple probabilities into single pdf. Product, not
            # summation, to combine probabilities of independant events. The
            # problem with this is if a few unexpected bits turn on it
            # mutliplies the result by zero, and the test dataset is going to
            # have unexpected things in it.
            return np.product(pdf, axis=0, keepdims=False)
        else:
            # Use summation B/C it works well.
            return np.sum(pdf, axis=0, keepdims=False)

    def __str__(self):
        s = "SDR Classifier alpha %g\n" % self.args.alpha
        s += "\tInput -> Output shapes are", self.input_shape, '->', self.output_shape
        return s
Пример #16
0
def main(parameters=default_parameters, argv=None, verbose=True):
    parser = argparse.ArgumentParser()
    parser.add_argument('-t', '--time', type=int, default=5,
                        help='Number of times to run through the training data.')
    parser.add_argument('--dataset', choices=('states', 'dictionary'), default='states')
    args = parser.parse_args(args = argv)

    # Load data.
    if args.dataset == 'states':
        dataset = state_names
        if verbose:
            print("Dataset is %d state names"%len(dataset))
    elif args.dataset == 'dictionary':
        dataset = read_dictionary()
        dataset = random.sample(dataset, 500)
        if verbose:
            print("Dataset is dictionary words, sample size %d"%len(dataset))

    dataset   = sorted(dataset)
    word_ids  = {word: idx for idx, word in enumerate(sorted(dataset))}
    confusion = np.zeros((len(dataset), len(dataset)))
    if verbose:
        print("Dataset: " + ", ".join('%d) %s'%idx_word for idx_word in enumerate(dataset)))

    # Construct TM.
    diagnostics_alpha = parameters['sp']['boosting_alpha']
    enc = EnumEncoder(**parameters['enc'])
    enc.output_sdr = SDR(enc.output_sdr, average_overlap_alpha = diagnostics_alpha)
    sp = SpatialPooler(
        input_sdr         = enc.output_sdr,
        **parameters['sp'])
    tm = TemporalMemory(
        column_sdr        = sp.columns,
        anomaly_alpha     = diagnostics_alpha,
        **parameters['tm'])
    sdrc = SDRClassifier(steps=[0], **parameters['tm_sdrc'])
    sdrc.compute(-1, [tm.active.size-1],    # Initialize the table.
        classification={"bucketIdx": [len(dataset)-1], "actValue": [len(dataset)-1]},
        learn=True, infer=False)

    def reset():
        enc.output_sdr.zero()
        sp.reset()
        tm.reset()

    # Train.
    if verbose:
        train_cycles = args.time * sum(len(w) for w in dataset)
        print("Training for %d cycles (%d dataset iterations)"%(train_cycles, args.time))
    for i in range(args.time):
        random.shuffle(dataset)
        for word in dataset:
            reset()
            for idx, char in enumerate(word):
                enc.encode(char)
                sp.compute()
                tm.compute()
            lbl = word_ids[word]
            sdrc.compute(tm.age, tm.learning.flat_index,
                classification={"bucketIdx": lbl, "actValue": lbl},
                learn=True, infer=False)

    if verbose:
        print("Encoder", enc.output_sdr.statistics())
        print(sp.statistics())
        print(tm.statistics())

    # Test.
    score = 0.
    score_samples = 0
    for word in dataset:
        reset()
        for idx, char in enumerate(word):
            enc.encode(char)
            sp.compute(learn = False)
            tm.compute(learn = False)

        inference = sdrc.infer(tm.active.flat_index, None)
        lbl = word_ids[word]
        if lbl == np.argmax(inference[0]):
            score += 1
        score_samples += 1
        confusion[lbl] += inference[0]
    print("Score:", 100. * score / score_samples, '%')

    if synapses_debug:
        tm.synapses.check_data_integrity()
        print("Synapse data structure integrity is OK.")

    if verbose:
        import matplotlib.pyplot as plt
        plt.figure('Confusion Matrix')
        plt.imshow(confusion, interpolation='nearest')
        plt.xlabel('Prediction')
        plt.ylabel('Label')
        plt.show()

    return score / score_samples
Пример #17
0
class TemporalMemory:
    """
    This implementation is based on the paper: Hawkins J. and Ahmad S. (2016)
    Why Neurons Have Thousands of Synapses, a Theory of Sequency Memory in
    Neocortex. Frontiers in Neural Circuits 10:23 doi: 10.3389/fncir.2016.00023
    """
    def __init__(self, 
        parameters,
        column_sdr,
        apical_sdr=None,
        inhibition_sdr=None,
        context_sdr=None,
        ):
        """
        Argument parameters is an instance of TemporalMemoryParameters
        Argument column_dimensions ...
        """
        assert(isinstance(parameters, TemporalMemoryParameters))
        self.args = args         = parameters
        assert(isinstance(column_sdr, SDR))
        self.columns             = column_sdr
        self.cells_per_column    = int(round(args.cells_per_column))
        if self.cells_per_column < 1:
            raise ValueError("Cannot create TemporalMemory with cells_per_column < 1.")
        self.segments_per_cell   = int(round(args.segments_per_cell))
        self.active              = SDR((self.columns.size, self.cells_per_column),
                                        activation_frequency_alpha = 1/1000,
                                        average_overlap_alpha      = 1/1000,)
        self.anomaly_alpha       = 1/1000
        self.mean_anomaly        = 0

        self.basal = Dendrite(
            input_sdr            = SDR(context_sdr if context_sdr is not None else self.active),
            active_sdr           = SDR(self.active),
            segments_per_cell    = args.segments_per_cell,
            synapses_per_segment = args.synapses_per_segment,
            initial_segment_size = args.initial_segment_size,
            add_synapses         = args.add_synapses,
            learning_threshold   = args.learning_threshold,
            predictive_threshold = args.predictive_threshold,
            permanence_inc       = args.permanence_inc,
            permanence_dec       = args.permanence_dec,
            permanence_thresh    = args.permanence_thresh,
            mispredict_dec       = args.mispredict_dec,)

        if apical_sdr is None:
            self.apical = None
        else:
            assert(isinstance(apical_sdr, SDR))
            self.apical = Dendrite(
                input_sdr            = apical_sdr,
                active_sdr           = self.active,
                segments_per_cell    = args.segments_per_cell,
                synapses_per_segment = args.synapses_per_segment,
                initial_segment_size = args.initial_segment_size,
                add_synapses         = args.add_synapses,
                learning_threshold   = args.learning_threshold,
                predictive_threshold = args.predictive_threshold,
                permanence_inc       = args.permanence_inc,
                permanence_dec       = args.permanence_dec,
                permanence_thresh    = args.permanence_thresh,
                mispredict_dec       = args.mispredict_dec,)

        if inhibition_sdr is None:
            self.inhibition = None
        else:
            assert(isinstance(inhibition_sdr, SDR))
            self.inhibition = Dendrite(
                input_sdr            = inhibition_sdr,
                active_sdr           = self.active,
                segments_per_cell    = args.segments_per_cell,
                synapses_per_segment = args.synapses_per_segment,
                initial_segment_size = args.initial_segment_size,
                add_synapses         = args.add_synapses,
                learning_threshold   = args.learning_threshold,
                predictive_threshold = args.predictive_threshold,
                permanence_inc       = args.permanence_inc,
                permanence_dec       = args.permanence_dec,
                permanence_thresh    = args.permanence_thresh,
                mispredict_dec       = 0,) # Is not but should be an inhibited segment in an active cell.

        self.reset()

    def reset(self):
        self.active.zero()
        self.reset_state = True

    def compute(self,
        context_sdr=None,
        column_sdr=None,
        apical_sdr=None,
        inhibition_sdr=None,):
        """
        Attribute anomaly, mean_anomaly are the fraction of neuron activations
                  which were predicted.  Range [0, 1]
        """
        ########################################################################
        # PHASE 1:  Make predictions based on the previous timestep.
        ########################################################################
        if context_sdr is None:
            context_sdr = self.active
        basal_predictions = self.basal.compute(input_sdr=context_sdr)
        predictions       = basal_predictions

        if self.apical is not None:
            apical_predictions = self.apical.compute(input_sdr=apical_sdr)
            predictions        = np.logical_or(predictions, apical_predictions)

        # Inhibition cancels out predictions.  The technical term is
        # hyper-polarization.  Practically speaking, this is needed so that
        # inhibiting neurons can cause mini-columns to burst.
        if self.inhibition is not None:
            inhibited   = self.inhibition.compute(input_sdr=inhibition_sdr)
            predictions = np.logical_and(predictions, np.logical_not(inhibited))

        ########################################################################
        # PHASE 2:  Determine the currently active neurons.
        ########################################################################
        self.columns.assign(column_sdr)
        columns = self.columns.flat_index

        # Activate all neurons which are in a predictive state and in an active
        # column, unless they are inhibited by apical input.
        active_dense      = predictions[columns]
        col_num, neur_idx = np.nonzero(active_dense)
        # This gets the actual column index, undoes the effect of discarding the
        # inactive columns before the nonzero operation.  
        col_idx           = columns[col_num]
        predicted_active  = (col_idx, neur_idx)

        # If a column activates but was not predicted by any neuron segment,
        # then it bursts.  The bursting columns are the unpredicted columns.
        bursting_columns = np.setdiff1d(columns, col_idx)
        # All neurons in bursting columns activate.
        burst_col_idx  = np.repeat(bursting_columns, self.cells_per_column)
        burst_neur_idx = np.tile(np.arange(self.cells_per_column), len(bursting_columns))
        burst_active   = (burst_col_idx, burst_neur_idx)
        # Apply inhibition to the bursting mini-columns.
        if self.inhibition is not None:
            uninhibited_mask = np.logical_not(inhibited[burst_active])
            burst_active     = np.compress(uninhibited_mask, burst_active, axis=1)

        # TODO: Combined apical and basal predictions can cause L5 cells to
        # spontaneously activate.
        if False:
            volunteers = np.logical_and(self.basal_predictions, self.apical_predictions)
            volunteers = np.nonzero(volunteers.ravel())
            unique1d(volunteers, predicted_active+burst_active)

        self.active.index = tuple(np.concatenate([predicted_active, burst_active], axis=1))

        # Only tell the dendrite about active cells which are allowed to learn.
        bursting_learning = (
            bursting_columns,
            np.random.randint(0, self.cells_per_column, size=len(bursting_columns)))
        # TODO: This will NOT work for CONTEXT, TM ONLY.
        self.basal.input_sdr.assign(self.basal.active_sdr) # Only learn about the winner cells from last cycle.
        self.basal.active_sdr.index = tuple(np.concatenate([predicted_active, bursting_learning], axis=1))

        # Anomally metric.
        self.anomaly      = np.array(burst_active).shape[1] / len(self.active)
        alpha             = self.anomaly_alpha
        self.mean_anomaly = (1-alpha)*self.mean_anomaly + alpha*self.anomaly

    def learn(self):
        """
        Learn about the previous to current timestep transition.
        """
        if self.reset_state:
            # Learning on the first timestep after a reset is not useful. The
            # issue is that waking up after a reset is inherently unpredictable.
            self.reset_state = False
            return

        # NOTE: All cells in a bursting mini-column will learn.  This includes
        # starting new segments if necessary.  This is different from Numenta's
        # TM which choses one cell to learn on a bursting column.  If in fact
        # all newly created segments work correctly, then I may in fact be
        # destroying any chance of it learning a unique representation of the
        # anomalous sequence by assigning all cells to represent it.  I was
        # thinking that maybe this would work anyways because the presynapses
        # are chosen randomly but now its evolved an initial segment size of 19!
        # FIXED?

        # Use the SDRs which were given durring the compute phase.
        # inputs = previous winner cells, active = current winner cells
        self.basal.learn(active_sdr=None)
        if self.apical is not None:
            self.apical.learn(active_sdr=self.active)
        if self.inhibition is not None:
            self.inhibition.learn(active_sdr=self.active)

    def statistics(self):
        stats  = 'Temporal Memory\n'
        stats += 'Predictive Segments ' + self.basal.statistics()
        if self.apical is not None:
            stats += 'Apical Segments ' + self.apical.statistics()
        if self.inhibition is not None:
            stats += 'Inhibition Segments ' + self.inhibition.statistics()

        stats += "Mean anomaly %g\n"%self.mean_anomaly
        stats += 'Activation statistics ' + self.active.statistics()

        return stats
class SpatialPooler:
    """
    This class handles the mini-column structures and the feed forward 
    proximal inputs to each cortical mini-column.

    [CITE THE SP PAPER HERE]

    Topology: This implements local inhibition with topology by creating many
    small groups of mini-columns which are distributed across the input space.
    All of the mini-columns in a group are located at the same location in the
    input space, and they inhibit each other equally.   Each group of mini-
    columns is self contained; groups of mini-columns do not inhibit each other
    or interact.  Instead of creating a large spatial pooler with topology, this
    creates many small spatial poolers with topology between the spatial
    poolers.
    """
    def __init__(self,
        input_sdr,
        mini_columns,     # Integer,
        sparsity,
        potential_pool,
        permanence_inc,
        permanence_dec,
        permanence_thresh,
        segments            = 1,
        macro_columns       = (1,),
        init_dist           = (0, 0),
        boosting_alpha      = None,
        active_thresh       = 0,
        radii               = tuple()):
        """
        Argument mini_columns is an Integer, the number of mini-columns in each 
            macro-column.

        Argument macro_columns is a tuple of integers.  Dimensions of macro
            column array.  These are topological dimensions.  Macro columns are
            distributed across the input space in a uniform grid.

        Optional Argument radii defines the input topology.  Trailing extra
            input dimensions are treated as non-topological dimensions.

        Argument segments is an Integer, number proximal segments for each
            mini-column.

        Argument sparsity ...

        Argument potential_pool ...

        Optional Argument boosting_alpha is the small constant used by the
        moving exponential average which tracks each mini-columns activation
        frequency.  Default value is None, which disables boosting altogether.

        Argument permanence_inc ...
        Argument permanence_dec ...
        Argument permanence_thresh ...
        Argument init_dist is (mean, std) of initial permanence values, which is a
                 gaussian random distribution.

        Argument active_thresh ...
        """
        assert(isinstance(input_sdr, SDR))
        assert(potential_pool > 1) # Number of synapses, not percent.
        self.mini_columns     = int(round(mini_columns))
        self.macro_columns    = tuple(int(round(dim)) for dim in macro_columns)
        self.radii            = radii
        self.segments         = int(round(segments))
        self.columns          = SDR(self.macro_columns + (self.mini_columns,),
            activation_frequency_alpha = boosting_alpha,
            average_overlap_alpha      = boosting_alpha,)
        self.sparsity         = sparsity
        self.active_thresh    = active_thresh
        self.potential_pool   = potential_pool
        self.age              = 0

        segment_shape = self.macro_columns + (self.mini_columns, self.segments)
        self.synapses = SynapseManager(
            input_sdr              = input_sdr,
            output_sdr             = SDR(segment_shape),
            radii                  = radii,
            init_dist              = init_dist,
            permanence_inc         = permanence_inc,
            permanence_dec         = permanence_dec,
            permanence_thresh      = permanence_thresh,
            initial_potential_pool = self.potential_pool,)

        if init_dist == (0, 0):
            # Nupic's SP init method
            # TODO: Make this a permanent part of the synapses class?  
            # Change init_dist argument to accept a string 'sp' ?
            for idx in range(self.synapses.output_sdr.size):
                pp = self.synapses.postsynaptic_permanences[idx].shape[0]
                connnected  = np.random.random(pp) > .5
                permanences = np.random.random(pp)
                permanences[connnected] *= 1 - self.synapses.permanence_thresh
                permanences[connnected] += self.synapses.permanence_thresh
                permanences[np.logical_not(connnected)] *= self.synapses.permanence_thresh
                self.synapses.postsynaptic_permanences[idx] = np.array(permanences, dtype=np.float32)
            self.synapses.rebuild_indexes()

        # Break ties randomly, in a constant unchanging manner.
        self.tie_breakers = np.random.uniform(0, .5, size=self.synapses.output_sdr.dimensions)

        self.boosting_alpha = boosting_alpha
        if boosting_alpha is not None:
            # Make a dedicated SDR to track segment activation frequencies for
            # boosting.
            self.boosting = SDR(self.synapses.output_sdr,
                                activation_frequency_alpha = boosting_alpha,
                                average_overlap_alpha      = boosting_alpha,)
            # Initialize to the target activation frequency/sparsity.
            self.boosting.activation_frequency.fill(self.sparsity / self.segments)

        self.reset()

    def reset(self):
        self.columns.zero()
        self.prev_updates = np.full(self.synapses.output_sdr.size, None)

    def compute(self, input_sdr=None, input_learning_sdr=None, learn=True):
        """
        """
        excitement, potential_excitement = self.synapses.compute(input_sdr=input_sdr)
        excitement = excitement + self.tie_breakers

        # Logarithmic Boosting Function.
        if self.boosting_alpha is not None:
            target_sparsity = self.sparsity / self.segments
            boost = np.log2(self.boosting.activation_frequency) / np.log2(target_sparsity)
            boost = np.nan_to_num(boost)
            excitement *= boost

        # Divide excitement by the number of connected synapses.
        n_con_syns = self.synapses.postsynaptic_connected_count
        n_con_syns = n_con_syns.reshape(self.synapses.output_sdr.dimensions)
        percent_overlap = excitement / n_con_syns

        # Reduce the segment dimension to each mini-columns single most excited
        # segment.
        column_excitement = np.max(percent_overlap, axis=-1)

        # Stable SP and Grid Cells modify the excitement here.
        column_excitement = self._compute_hook(column_excitement)

        # Activate mini-columns.  First determine how many mini-columns to
        # activate in each macro-column.
        n_activate = max(1, int(round(self.mini_columns * self.sparsity)))

        # Activate the most excited mini-columns in each macro-column.
        k = self.mini_columns - n_activate
        mini_index = np.argpartition(column_excitement, k, axis=-1)[..., k:]

        # Convert activations from mini-column indices to macro-column indices.
        macro_index    = tuple(np.indices(mini_index.shape))[:-1]
        winner_columns = tuple(x.reshape(-1) for x in macro_index + (mini_index,))
        # Filter out columns with sub-threshold excitement.
        winner_excitement = np.max(excitement[winner_columns], axis=-1)
        winner_columns    = tuple(np.compress(winner_excitement >= self.active_thresh,
                                      winner_columns, axis=1))

        # Output the results into the columns sdr.
        self.columns.index = winner_columns

        if learn:
            seg_idx = np.argmax(excitement[winner_columns], axis=-1)
            learning_segments = winner_columns + (seg_idx,)
            self.prev_updates = self.synapses.learn(
                input_sdr    = input_learning_sdr,
                output_sdr   = learning_segments,
                prev_updates = self.prev_updates,)

            # Update the exponential moving average of each segments activation frequency.
            if self.boosting_alpha is not None:
                self.boosting.assign(learning_segments)

            self.age += 1

        return self.columns

    def _compute_hook(self, x):
        """Subclasses override this method."""
        return x

    def statistics(self, _class_name='Spatial Pooler'):
        stats = _class_name + ' '
        stats += self.synapses.statistics()
        stats += 'Columns ' + self.columns.statistics()

        if self.boosting_alpha is not None:
            if self.segments > 1:
                stats  += 'Segments ' + self.boosting.statistics()
            af         = self.boosting.activation_frequency
            target     = self.sparsity / self.segments
            boost_min  = np.log2(np.min(af))  / np.log2(target)
            boost_mean = np.log2(np.mean(af)) / np.log2(target)
            boost_max  = np.log2(np.max(af))  / np.log2(target)
            stats += '\tLogarithmic Boosting Multiplier min/mean/max  {:-.04g}% / {:-.04g}% / {:-.04g}%\n'.format(
                    boost_min   * 100,
                    boost_mean  * 100,
                    boost_max   * 100,)
        return stats
Пример #19
0
def main(parameters=default_parameters, argv=None, verbose=True):
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--episode_length',
        type=int,
        default=100,
    )
    parser.add_argument(
        '--train_episodes',
        type=int,
        default=100 * len(patterns),
    )
    parser.add_argument(
        '--test_episodes',
        type=int,
        default=20 * len(patterns),
    )
    parser.add_argument(
        '--environment_size',
        type=int,
        default=40,
    )
    parser.add_argument('--move_env', action='store_true')
    parser.add_argument('--show_pattern', action='store_true')
    args = parser.parse_args(args=argv)

    # PARAMETER OVERRIDES!
    parameters['grid_cells'] = default_parameters['grid_cells']

    if verbose:
        import pprint
        print("Parameters = ", end='')
        pprint.pprint(parameters)
        print("Episode Length", args.episode_length)

    env = Environment(size=args.environment_size)
    gc = GridCellEncoder(**parameters['grid_cells'])

    trajectory = TemporalMemory(column_sdr=gc.grid_cells,
                                context_sdr=None,
                                anomaly_alpha=1. / 1000,
                                predicted_boost=1,
                                segments_per_cell=20,
                                **parameters['trajectory'])
    trajectory_sdrc = SDRClassifier(steps=[0])

    motion = StableSpatialPooler(input_sdr=SDR(trajectory.active),
                                 **parameters['motion'])
    motion_sdrc = SDRClassifier(steps=[0])

    def reset():
        env.reset()
        gc.reset()
        trajectory.reset()
        motion.reset()

    env_offset = np.zeros(2)

    def compute(learn=True):
        gc_sdr = gc.encode(env.position + env_offset)

        trajectory.compute(
            column_sdr=gc_sdr,
            learn=learn,
        )

        motion.compute(
            input_sdr=trajectory.active,
            input_learning_sdr=trajectory.learning,
            learn=learn,
        )

    # Train
    if verbose:
        print("Training for %d episodes ..." % args.train_episodes)
        start_time = time.time()
    for session in range(args.train_episodes):
        reset()
        pattern = random.randrange(len(patterns))
        pattern_func = patterns[pattern]
        for step in range(args.episode_length):
            angle = pattern_func(env.angle * 180 / math.pi,
                                 motion.age) * math.pi / 180
            env.move(angle)
            if env.collision:
                reset()
                continue
            compute()
            trajectory_sdrc.compute(trajectory.age,
                                    trajectory.learning.flat_index,
                                    classification={
                                        "bucketIdx": pattern,
                                        "actValue": pattern
                                    },
                                    learn=True,
                                    infer=False)
            motion_sdrc.compute(motion.age,
                                motion.columns.flat_index,
                                classification={
                                    "bucketIdx": pattern,
                                    "actValue": pattern
                                },
                                learn=True,
                                infer=False)
            if verbose and motion.age % 10000 == 0:
                print("Cycle %d" % motion.age)
        if args.show_pattern:
            env.plot_course()

    if verbose:
        train_time = time.time() - start_time
        start_time = time.time()
        print("Elapsed time (training): %d seconds." % int(round(train_time)))
        print("")
        print("Trajectory", trajectory.statistics())
        print("Motion", motion.statistics())
        print("")

    # Test
    if verbose:
        print("Testing for %d episodes ..." % args.test_episodes)
    if args.move_env:
        env_offset = np.array([9 * env.size, 9 * env.size])
        if verbose:
            print("Moved to new environment.")
    trajectory_accuracy = 0
    motion_accuracy = 0
    sample_size = 0
    trajectory_confusion = np.zeros((len(patterns), len(patterns)))
    motion_confusion = np.zeros((len(patterns), len(patterns)))
    for episode in range(args.test_episodes):
        reset()
        pattern = random.randrange(len(patterns))
        pattern_func = patterns[pattern]
        for step in range(args.episode_length):
            angle = pattern_func(env.angle * 180 / math.pi,
                                 motion.age) * math.pi / 180
            env.move(angle)
            if env.collision:
                reset()
                continue
            compute(learn=True)
            trajectory_inference = trajectory_sdrc.infer(
                trajectory.learning.flat_index, None)[0]
            if pattern == np.argmax(trajectory_inference):
                trajectory_accuracy += 1
            trajectory_confusion[pattern][np.argmax(trajectory_inference)] += 1
            motion_inference = motion_sdrc.infer(motion.columns.flat_index,
                                                 None)[0]
            if pattern == np.argmax(motion_inference):
                motion_accuracy += 1
            motion_confusion[pattern][np.argmax(motion_inference)] += 1
            sample_size += 1
    trajectory_accuracy /= sample_size
    motion_accuracy /= sample_size
    if verbose:
        print("Trajectory Accuracy %g, %d catagories." %
              (trajectory_accuracy, len(patterns)))
        print("Motion Accuracy     %g" % motion_accuracy)

    # Display Confusion Matixes
    if verbose:
        conf_matrices = (
            trajectory_confusion,
            motion_confusion,
        )
        conf_titles = (
            'Trajectory',
            'Motion',
        )
        #
        plt.figure("Pattern Recognition Confusion")
        for subplot_idx, matrix_title in enumerate(
                zip(conf_matrices, conf_titles)):
            matrix, title = matrix_title
            plt.subplot(1, len(conf_matrices), subplot_idx + 1)
            plt.title(title + " Confusion")
            matrix_sum = np.sum(matrix, axis=1)
            matrix_sum[matrix_sum == 0] = 1
            matrix = (matrix.T / matrix_sum).T
            plt.imshow(matrix, interpolation='nearest')
            plt.xlabel('Prediction')
            plt.ylabel('Label')

    if synapses_debug:
        gc.synapses.check_data_integrity()
        trajectory.synapses.check_data_integrity()
        motion.synapses.check_data_integrity()
        print("Synapse data structure integrity is OK.")

    if verbose:
        test_time = time.time() - start_time
        print("Elapsed time (testing): %d seconds." % int(round(test_time)))
        plt.show()
    return motion_accuracy
Пример #20
0
    def evaluate(self):
        data = Dataset('datasets/textures/')
        max_dist = -32
        timer = genetics.speed_fitness(threshold=60, maximum=2 * 60)
        sensor = htm.EyeSensor(self.eye)
        sp = htm.SpatialPooler(self.sp,
                               input_sdr=sensor.optic_sdr,
                               column_sdr=SDR(self.columns),
                               radii=self.radii,
                               multisegment_experiment=None)
        classifier = htm.SDR_Classifier(self.sdrc,
                                        sp.columns, (len(data.names), ),
                                        output_type='pdf')
        baseline = htm.RandomOutputClassifier((len(data.names), ))

        time_per_image = int(round(self.time_per_image))
        num_images = int(round(self.image_cycles))
        if self.debug:
            sampler = htm.EyeSensorSampler(sensor, num_images, 30)
            memory_perf = 0  # Debug takes extra memory, this is disabled during debug.
        sp_score = 0
        baseline_score = 0
        # Outer loop through images.
        for img_num in range(num_images):
            # Setup for a new image
            data.random_image()
            sensor.new_image(data.current_image)
            # Determine where the eye will look on the image.
            positions = data.points_near_label(max_dist=max_dist,
                                               number=time_per_image)

            # Inner loop through samples from each image.
            for sample_point in positions:
                # Classify the image.
                sensor.randomize_view()  # Get a new orientation and scale.
                sensor.position = sample_point
                sp.compute(input_sdr=sensor.view())
                sp_prediction = classifier.predict(sp.columns)
                baseline_prediction = baseline.predict()

                # Compare results to labels.
                label_sample_points = sensor.input_space_sample_points(20)
                labels = data.sample_labels(label_sample_points)
                sp_score += data.compare_label_samples(sp_prediction, labels)
                baseline_score += data.compare_label_samples(
                    baseline_prediction, labels)
                # Learn.
                sp.learn()
                classifier.train(sp.columns.index, labels)
                baseline.train(labels)

            # Sample memory usage.
            if img_num == min(10, num_images - 1) and not self.debug:
                # I want each process to take no more than 20% of my RAM or 2.5
                # GB.  This should let me run 4 processes + linux + firefox.
                memory_perf = genetics.memory_fitness(2e9, 4e9)

        sp_score /= sp.age
        baseline_score /= sp.age
        time_perf = timer.done(
        )  # Stop the timer before debug opens windows and returns control to user.
        if self.debug:
            print(sp.statistics())
            sampler.view_samples()

        return {
            'baseline': baseline_score,
            'score': sp_score,
            'time': time_perf,
            'memory': memory_perf,
        }
Пример #21
0
 def __init__(self, input_space):
     self.output = SDR(tuple(input_space) + (2, ))
Пример #22
0
 def __init__(self, resolution, size, sparsity):
     self.resolution = resolution
     self.output_sdr = SDR((size,))
     self.sparsity   = sparsity
Пример #23
0
 def __init__(self, size, sparsity):
     self.output_sdr   = SDR((size,))
     self.sparsity     = sparsity
Пример #24
0
    def __init__(self, 
        parameters,
        column_sdr,
        apical_sdr=None,
        inhibition_sdr=None,
        context_sdr=None,
        ):
        """
        Argument parameters is an instance of TemporalMemoryParameters
        Argument column_dimensions ...
        """
        assert(isinstance(parameters, TemporalMemoryParameters))
        self.args = args         = parameters
        assert(isinstance(column_sdr, SDR))
        self.columns             = column_sdr
        self.cells_per_column    = int(round(args.cells_per_column))
        if self.cells_per_column < 1:
            raise ValueError("Cannot create TemporalMemory with cells_per_column < 1.")
        self.segments_per_cell   = int(round(args.segments_per_cell))
        self.active              = SDR((self.columns.size, self.cells_per_column),
                                        activation_frequency_alpha = 1/1000,
                                        average_overlap_alpha      = 1/1000,)
        self.anomaly_alpha       = 1/1000
        self.mean_anomaly        = 0

        self.basal = Dendrite(
            input_sdr            = SDR(context_sdr if context_sdr is not None else self.active),
            active_sdr           = SDR(self.active),
            segments_per_cell    = args.segments_per_cell,
            synapses_per_segment = args.synapses_per_segment,
            initial_segment_size = args.initial_segment_size,
            add_synapses         = args.add_synapses,
            learning_threshold   = args.learning_threshold,
            predictive_threshold = args.predictive_threshold,
            permanence_inc       = args.permanence_inc,
            permanence_dec       = args.permanence_dec,
            permanence_thresh    = args.permanence_thresh,
            mispredict_dec       = args.mispredict_dec,)

        if apical_sdr is None:
            self.apical = None
        else:
            assert(isinstance(apical_sdr, SDR))
            self.apical = Dendrite(
                input_sdr            = apical_sdr,
                active_sdr           = self.active,
                segments_per_cell    = args.segments_per_cell,
                synapses_per_segment = args.synapses_per_segment,
                initial_segment_size = args.initial_segment_size,
                add_synapses         = args.add_synapses,
                learning_threshold   = args.learning_threshold,
                predictive_threshold = args.predictive_threshold,
                permanence_inc       = args.permanence_inc,
                permanence_dec       = args.permanence_dec,
                permanence_thresh    = args.permanence_thresh,
                mispredict_dec       = args.mispredict_dec,)

        if inhibition_sdr is None:
            self.inhibition = None
        else:
            assert(isinstance(inhibition_sdr, SDR))
            self.inhibition = Dendrite(
                input_sdr            = inhibition_sdr,
                active_sdr           = self.active,
                segments_per_cell    = args.segments_per_cell,
                synapses_per_segment = args.synapses_per_segment,
                initial_segment_size = args.initial_segment_size,
                add_synapses         = args.add_synapses,
                learning_threshold   = args.learning_threshold,
                predictive_threshold = args.predictive_threshold,
                permanence_inc       = args.permanence_inc,
                permanence_dec       = args.permanence_dec,
                permanence_thresh    = args.permanence_thresh,
                mispredict_dec       = 0,) # Is not but should be an inhibited segment in an active cell.

        self.reset()
    def evaluate(self, debug):
        # Setup test and train datasets and perform lexical analysis.  First get
        # full text of training dataset into a string.
        if self.dataset == 'gutenberg':
            text_stream = read_corpus(debug=debug)
        elif self.dataset == 'states':
            text_stream = state_name_reader(debug=debug)
        train_dataset = []
        for i in range(self.train_time):
            char = next(text_stream)
            train_dataset.append(char)
        train_dataset = ''.join(train_dataset)
        # Search for words in the dataset.  Store the words as keys in a
        # histogram of word occurances.
        word_regex = r"\w(\w|')*"
        word_iter = re.finditer(word_regex, train_dataset)
        word_hist = {}
        # train_word_spans stores where each word is located, list of pairs of
        # (start, end) index into train_dataset.
        train_word_spans = []
        for match in word_iter:
            span = match.span()  # Returns pair of (start-index, end-index)
            word = train_dataset[span[0]:span[1]]
            if word not in word_hist:
                word_hist[word] = 0
            word_hist[word] += 1
            train_word_spans.append(span)
        # Sort words by the number of times the occur in the train_dataset.
        # Break ties randomly.
        word_list = list(word_hist.keys())
        word_freq = [
            -(word_hist[word] + random.random()) for word in word_list
        ]
        word_rank = np.take(word_list, np.argsort(word_freq))
        # Get word_list and word_freq out of memory, from here on use word_rank & word_hist.
        word_list = None
        word_freq = None
        # Select some common words to test vocabulary with.
        test_words = word_rank[:self.test_words].tolist()
        # Assign each vocabulary word an integer identifier.  A test words
        # identifier doubles as its index into the test_words list.
        if False:
            test_words.sort()  # Make the index easier for humans to use.
        # The first entry is special B/C when the SDRC can't identify the word
        # at all, it outputs all zeros.  Then np.argmax() outputs as index 0 as
        # the best prediction.
        test_words.insert(0, "WORD_UNKNOWN")
        word_hist[test_words[0]] = 0
        test_word_id_lookup = {
            word: index
            for index, word in enumerate(test_words)
        }
        # Search for examples of the vocabulary words used in sentances.  First
        # read a large amount of sample text.  Only relevant sections of the
        # test_dataset are used, the ranges of text are stored in variable
        # test_sentance_spans.
        test_dataset = []
        for i in range(int(self.test_time)):
            char = next(text_stream)
            test_dataset.append(char)
        test_dataset = ''.join(test_dataset)
        word_iter = re.finditer(word_regex, test_dataset)
        # The following two lists hold pairs of (start, end) slice indexes into
        # test_dataset.  They are NOT the same length because overlapping
        # test_sentance_spans are merged into a single example containing
        # several of the vocabulary words.
        test_word_spans = []  # Spans of just the test vocabulary words.
        test_sentance_spans = [
        ]  # Spans of test words with preceding context included.
        test_hist = {word: 0 for word in test_words}
        for match in word_iter:
            span = match.span()  # Returns pair of (start-index, end-index)
            start, end = span
            word = test_dataset[start:end]
            if word not in test_word_id_lookup.keys():
                continue
            # Ignore test vocabulary words after they've been seen many times.
            if test_hist[word] >= self.test_sample:
                continue
            test_hist[word] += 1
            test_word_spans.append(span)
            context_start = max(0, start - self.min_context)
            if test_sentance_spans and test_sentance_spans[-1][
                    1] >= context_start:
                # Extend the last test sentance and test this additional word using it.
                context_start = test_sentance_spans[-1][0]
                test_sentance_spans[-1] = (context_start, end)
            else:
                # Add a new test sentance.
                test_sentance_spans.append((context_start, end))
        len_test_dataset = sum(e - s for s, e in test_sentance_spans)
        if debug:
            print('Training dataset size:', self.train_time, 'characters,',
                  len(train_word_spans), 'words,', len(word_hist),
                  'unique words.')
            print('Test vocabulary size:', len(test_words), 'words.')
            min_freq = min(word_hist[word] for word in test_words[1:])
            max_freq = max(word_hist[word] for word in test_words[1:])
            print('Test vocabulary samples:',
                  ', '.join(random.sample(test_words[1:], 6)) + '.')
            print(
                'Test vocabulary min & max occurances in training dataset: %d - %d.'
                % (min_freq, max_freq))
            test_hist_values = list(test_hist[word] for word in test_words[1:])
            min_freq = min(test_hist_values)
            avg_freq = np.mean(test_hist_values)
            max_freq = max(test_hist_values)
            print(
                'Test vocabulary min/mean/max occurances in testing dataset: %d / %.1f / %d.'
                % (min_freq, avg_freq, max_freq))
            print('Test dataset size:', len_test_dataset, 'characters,',
                  len(test_word_spans), 'vocabulary words.')
            print('Test sentance average length: %.1f characters.' %
                  (len_test_dataset / len(test_sentance_spans)))
            if self.list_test_words:
                print('Index) Word, Train samples, Test samples.')
                if False:
                    # Sort by number of samples in dataset.
                    # TODO: This would be more useful if it sorted the actual test_words list.
                    test_freq = [-test_hist[word] for word in test_words]
                    test_rank = np.take(test_words, np.argsort(test_freq))
                    ordered_words = test_rank
                else:
                    # Sort by index.
                    ordered_words = test_words
                # Print index of test words.
                for word in ordered_words:
                    index = test_word_id_lookup[word]
                    train_samples = word_hist[word]
                    test_samples = test_hist[word]
                    fmt_str = '%3d) %-15s\t%2d, %2d'
                    print(fmt_str % (index, word, train_samples, test_samples))
                if True:
                    # Look at some test sentances
                    sample_spans = random.sample(
                        test_sentance_spans, min(10, len(test_sentance_spans)))
                    sample_sentances = [
                        test_dataset[s[0]:s[1]] for s in sample_spans
                    ]
                    print('Sample test sentances:\n\t',
                          '\n\n\t'.join(sample_sentances))
            print()
        # After seeing all of the words in either dataset, wait forever for the
        # next word, or until the dataset is finished playing.  This case is
        # needed when the dataset ends with white space.
        train_word_spans.append((float('inf'), float('inf')))
        test_word_spans.append((float('inf'), float('inf')))
        # if len(test_words) != self.test_words + 1:
        #     raise ValueError('Could not find %d test words'%self.test_words)

        # Setup AI.
        timer = genetics.speed_fitness(self.time_limit / 3, self.time_limit)
        enc = encoders.EnumEncoder(self.enc_bits,
                                   self.enc_sparsity,
                                   diag=False)

        # Make the context SDR which both L4 and L23 use to predict the future.
        context_size = self.l4.cells + self.l23.cells
        context = SDR((context_size, ))
        l4 = unified.Unified(
            self.l4,
            input_sdr=enc.output_sdr,
            context_sdr=context,
            macro_columns=(1, ),
            radii=self.l4_radii,
        )
        l23 = unified.Unified(
            self.l23,
            input_sdr=l4.active,
            context_sdr=context,
            macro_columns=(1, ),
            radii=self.l23_radii,
        )

        l4_sdrc = classifiers.SDR_Classifier(self.sdrc, l4.active,
                                             (len(test_words), ), 'index')
        l23_sdrc = classifiers.SDR_Classifier(self.sdrc, l23.active,
                                              (len(test_words), ), 'index')

        def reset():
            l4.reset()
            l23.reset()

        def compute(learn=True):
            context.assign_flat_concatenate([l4.active, l23.active])
            if self.l4_only:
                # Test L4 in isolation, Disable feedback from L2/3 to L4.
                zeros_like_l23 = SDR(l23.active)
                zeros_like_l23.zero()
                context.assign_flat_concatenate([l4.active, zeros_like_l23])
            l4.compute()
            l23.compute()
            if learn:
                l4.learn()
                l23.learn()

        if debug:
            print('SDR DEBUG:', sdr.debug)
            if self.l4_only:
                print("L4 Isolated, Disabled L2/3 -> L4 Feedback.")
            if False:
                print('L4', l4.statistics())
                print('L23', l23.statistics())

        # Train by reading books.
        if self.train_no_stability:
            self.l23.min_stability = 0
            assert (debug)
            print('L23 min stability set to', self.l23.min_stability)
        if debug:
            print('Training ...')
        word = None  # Current word or None, AI trains to predict this variable.
        word_index = None  # Index of current word in test_data, or None if its not a test word.
        word_span_index = 0  # Index of current word in train_dataset
        reset()
        for step in range(self.train_time):
            # Determine the current word.
            start, end = train_word_spans[word_span_index]
            if step == start:
                word = train_dataset[start:end]
                try:
                    word_index = (test_word_id_lookup[word], )
                except KeyError:  # Word is not in vocabulary test words, SDRC should ignore it.
                    word_index = None
            if step == end:
                word = None
                word_index = None
                word_span_index += 1
            # Process the next letter of the book.
            char = train_dataset[step]
            enc.encode(char)
            compute(learn=True)
            if word_index is not None and step == end - 1:
                l4_sdrc.train(input_sdr=None, out=word_index)
                l23_sdrc.train(input_sdr=None, out=word_index)

        # Test.  Measure:
        # 1) Stability,
        # 2) Anomaly,
        # 3) Word recognition accuracy and cross-catagory confusion.
        real_min_stab = l23.args.min_stability
        if self.test_no_stability:
            l23.args.min_stability = 0
        if debug:
            print('Testing ...')
            if l23.args.min_stability != real_min_stab:
                print('L23 min stability changed to', l23.args.min_stability)
            else:
                print('L23 min stability remains at', l23.args.min_stability)
        l23_stability = 0.  # Accumulates the L2/3 stability.
        l4_anomaly = 0.  # Accumulates the L4 anomaly.
        l23_anomaly = 0.  # Accumulates the L2/3 anomaly.
        l4_accuracy = 0.  # Accumulates the L4 word classificatioon accuracy.
        l23_accuracy = 0.  # Accumulates the L2/3 word classificatioon accuracy.
        max_accuracy = 0.  # Number of samples accumulated in variable 'l23_accuracy'.
        l4_end_accuracy = 0.  # Like 'l4_accuracy' but only measured on the final letter of the word.
        l23_end_accuracy = 0.  # Like 'l23_accuracy' but only measured on the final letter of the word.
        max_end_accuracy = 0.  # Number of samples accumulated in variable 'l23_end_accuracy'.
        l23_confusion = np.zeros((len(test_words), len(test_words)))
        l4_confusion = np.zeros((len(test_words), len(test_words)))
        next_span_index = 0  # Index of current word in test_word_spans (or next word if not currently on a word).
        for sentance_start, sentance_end in test_sentance_spans:
            reset()
            word_index = None  # Index of current word, or None.
            for index in range(sentance_start, sentance_end):
                # Determine the current word.  Allow words to immediately follow
                # each other, they in case they're seperated by a reset and zero
                # characters of context.
                word_start, word_end = test_word_spans[next_span_index]
                if index >= word_end:
                    word_index = None
                    next_span_index += 1
                word_start, word_end = test_word_spans[next_span_index]
                if index >= word_start:
                    word = test_dataset[word_start:word_end]
                    word_index = test_word_id_lookup[word]
                # Process the current character.
                char = test_dataset[index]
                enc.encode(char)
                compute(learn=False)
                # Measure.
                if real_min_stab > 0:
                    l23_stability += min(l23.stability,
                                         real_min_stab) / real_min_stab
                else:
                    l23_stability += 1
                l4_anomaly += l4.anomaly
                l23_anomaly += l23.anomaly
                if word_index is not None:
                    l4_prediction = l4_sdrc.predict()
                    l23_prediction = l23_sdrc.predict()
                    l4_best_guess = np.argmax(l4_prediction)
                    l23_best_guess = np.argmax(l23_prediction)
                    if l23_best_guess == word_index:
                        l23_accuracy += 1
                        if index == word_end - 1:
                            l23_end_accuracy += 1
                    if l4_best_guess == word_index:
                        l4_accuracy += 1
                        if index == word_end - 1:
                            l4_end_accuracy += 1
                    max_accuracy += 1
                    if index == word_end - 1:
                        max_end_accuracy += 1
                    # Update confusion matixes.  Prediction is a PDF, sum must equal 1.
                    if True:
                        l23_confusion[word_index, l23_best_guess] += 1
                        if index == word_end - 1:
                            l4_confusion[word_index, l4_best_guess] += 1
                    else:
                        l23_prediction_sum = np.sum(l23_prediction)
                        if l23_prediction_sum != 0.:
                            l23_prediction /= l23_prediction_sum
                            l23_confusion[word_index, :] += l23_prediction
                        l4_prediction_sum = np.sum(l4_prediction)
                        if l4_prediction_sum != 0.:
                            l4_prediction /= l4_prediction_sum
                            l4_confusion[word_index, :] += l4_prediction
        # Divide all accumulators by the number of samples added to them.
        l23_stability /= len_test_dataset
        l23_accuracy /= max_accuracy
        l23_end_accuracy /= max_end_accuracy
        l23_anomaly /= len_test_dataset
        l4_accuracy /= max_accuracy
        l4_end_accuracy /= max_end_accuracy
        l4_anomaly /= len_test_dataset
        for label_idx, label in enumerate(test_words):
            # Divide by the number of PDF's which have accumulated at each
            # label, each PDF has sum of 1.
            l23_num_samples = np.sum(l23_confusion[label_idx, :])
            if l23_num_samples != 0:
                l23_confusion[label_idx, :] /= l23_num_samples
            l4_num_samples = np.sum(l4_confusion[label_idx, :])
            if l4_num_samples != 0:
                l4_confusion[label_idx, :] /= l4_num_samples

        def plot_sentance_stability(string):
            plt.figure('Stability')
            plt.ylim(-0.01, 1.01)
            plt.xlim(-0.5, len(string) - 0.5)
            plt.xlabel('Time')
            plt.ylabel('L2/3 Overlap')
            plt.axhline(real_min_stab)
            stability = []
            confidence = []
            anomaly = []
            reset()
            for step, char in enumerate(string):
                enc.encode(char)
                compute(learn=False)
                stability.append(l23.stability)
                anomaly.append(l23.anomaly)
                prediction = l23_sdrc.predict()
                best_guess = test_words[np.argmax(prediction)]
                confidence.append(np.max(prediction) / np.sum(prediction))
                #
                plt.axvline(step + .5, color='grey', alpha=0.25)
                plt.text(step - 0.25, .98, char)
                if char.isalpha():
                    plt.text(step - 0.25,
                             0.95,
                             best_guess,
                             rotation='vertical')
                # TODO: Determine which steps it learns on by looking at the dataset.
                elif step - 1 >= 0 and string[step - 1].isalpha():
                    plt.axvspan(step - 1.5,
                                step - .5,
                                color='yellow',
                                alpha=0.5)
            plt.axvspan(step - .5, step + .5, color='yellow', alpha=0.5)
            plt.plot(
                np.arange(len(string)),
                stability,
                'r-',
            )
            # np.arange(len(string)), confidence,   'b-',)
            # np.arange(len(string)), anomaly,   'b-',)
            # plt.title('L2/3 Overlap is Red,  Confidence is Blue')
            # plt.title('L2/3 Overlap is Red,  Anomaly is Blue')
            plt.title((
                'Top: Input Letter, Middle: Best Guess,\n' +
                'Bottom Graph: Red Line L2/3 Stability, Blue Line: Target Stability, Learning Enabled on Yellow Steps.'
            ))

        # Report.
        fitness = {
            'L23_stability': l23_stability,
            'L23_accuracy': l23_accuracy,
            'L23_end_accuracy': l23_end_accuracy,
            'L23_anomaly': l23_anomaly,
            'L4_accuracy': l4_accuracy,
            'L4_end_accuracy': l4_end_accuracy,
            'L4_anomaly': l4_anomaly,
            'speed': timer.done(),
            'memory': genetics.memory_fitness(2e9, 3e9),
        }
        if debug:
            print()
            print('L4', l4.statistics())
            print('L23', l23.statistics())

            span = random.choice(test_sentance_spans)
            sentance = test_dataset[span[0]:span[1]]
            sentance = sentance[
                -100:]  # Don't show too much text in one figure.
            if self.show_typos:
                sentance = ' '.join(
                    [mutate_word(w) for w in sentance.split(' ')])
            plot_sentance_stability(sentance)

            plt.figure('L23 Confusion Matrix')
            plt.imshow(l23_confusion, interpolation='nearest')
            plt.xlabel('Prediction')
            plt.ylabel('Label')
            plt.figure('L4 Confusion Matrix')
            plt.imshow(l4_confusion, interpolation='nearest')
            plt.xlabel('Prediction')
            plt.ylabel('Label')

            plt.show()
        return fitness
Пример #26
0
class Cerebrum:
    """
    """
    def __init__(self, cerebrum_parameters, region_parameters, input_sdrs):
        self.cerebrum_parameters = cerebrum_parameters
        self.region_parameters   = tuple(region_parameters)
        self.inputs              = tuple(input_sdrs)
        self.age                 = 0
        assert(isinstance(cerebrum_parameters, CerebrumParameters))
        assert(all(isinstance(rgn, CorticalRegionParameters) for rgn in self.region_parameters))
        assert(len(region_parameters) == len(self.inputs))
        assert(all(isinstance(inp, SDR) for inp in self.inputs))

        # The size of the cortex needs to be known before it can be constructed.
        context_size     = 0
        self.apical_sdrs = []
        for rgn_args in self.region_parameters:
            num_cols  = np.product([int(round(dim)) for dim in rgn_args.out_cols])
            cells_per = int(round(cerebrum_parameters.out_tm.cells_per_column))
            context_size += num_cols * cells_per * 2
            L5_dims      = (num_cols * cells_per,)
            self.apical_sdrs.append((SDR(L5_dims), SDR(L5_dims)))
        self.L23_activity  = SDR((context_size/2,))
        self.L5_activity   = SDR((context_size/2,))
        self.context_sdr   = SDR((context_size,))

        # Construct the Basal Ganglia
        self.basal_ganglia = BasalGanglia(cerebrum_parameters.bg,
                                          input_sdr  = self.context_sdr,
                                          output_sdr = self.L5_activity,)

        # Construct the cortex.
        self.regions = []
        for rgn_args, inp, apical in zip(self.region_parameters, input_sdrs, self.apical_sdrs):
            rgn = CorticalRegion(cerebrum_parameters, rgn_args,
                                 input_sdr      = inp,
                                 context_sdr    = self.context_sdr,
                                 apical_sdr     = self.basal_ganglia.d1.active,
                                 inhibition_sdr = self.basal_ganglia.d2.active,)
            self.regions.append(rgn)

        # Construct the motor controls.
        pass

    def reset(self):
        self.basal_ganglia.reset()
        for rgn in self.regions:
            rgn.reset()

    def compute(self, reward, learn=True):
        """
        Runs a single cycle for a whole network of cortical regions.
        Arguments inputs and regions are parallel lists.
        Optional Argument apical_input ... dense integer array, shape=output-dimensions
        Optional argument learn ... default is True.
        """
        for rgn in self.regions:
            rgn.compute()

        self.L5_activity.assign_flat_concatenate(rgn.L5_tm.active for rgn in self.regions)
        self.L23_activity.assign_flat_concatenate(rgn.L23_tm.active for rgn in self.regions)
        self.context_sdr.assign_flat_concatenate([self.L5_activity, self.L23_activity])

        if not learn:
            reward = None
        self.basal_ganglia.compute(reward)

        if learn:
            for rgn in self.regions:
                rgn.learn(self.basal_ganglia)

        # Motor controls.
        pass

        if learn:
            self.age += 1

    def statistics(self):
        stats = ''
        for idx, rgn in enumerate(self.regions):
            stats += 'Region {}\n'.format(idx+1)
            stats += rgn.statistics() + '\n'
        # stats += self.basal_ganglia.statistics()
        return stats
Пример #27
0
def main(parameters=default_parameters, argv=None, verbose=True):
    # Setup
    num_objects = 100
    object_sizes = range(20, 40 + 1)
    train_iterations = 100
    test_iterations = 5
    steps_per_object = range(3, 17 + 1)
    inputs, objects = object_dataset(num_objects, object_sizes)

    enc = EnumEncoder(2400, 0.02)
    enc.output_sdr = SDR(
        enc.output_sdr,
        activation_frequency_alpha=parameters['boosting_alpha'],
        average_overlap_alpha=parameters['boosting_alpha'],
    )

    sp = StableSpatialPooler(input_sdr=enc.output_sdr,
                             macro_columns=(1, ),
                             **parameters)
    sdrc = SDRClassifier(steps=[0])

    def measure_catagories():
        # Compute every sensation for every object.
        objects_columns = []
        for obj in objects:
            objects_columns.append([])
            for sensation in obj:
                sp.reset()
                enc.encode(sensation)
                sp.compute(learn=False)
                objects_columns[-1].append(SDR(sp.columns))
        sp.reset()
        return objects_columns

    if verbose:
        print("Num-Inputs  ", len(set(itertools.chain.from_iterable(objects))))
        print('Num-Objects ', num_objects)
        print("Object-Sizes", object_sizes)
        print("Steps/Object", steps_per_object)
        print(sp.statistics())
        objects_columns = measure_catagories()
        measure_inter_intra_overlap(objects_columns, verbose)
        print("")

        # TRAIN
        train_time = train_iterations * num_objects * np.mean(steps_per_object)
        print('TRAINING for ~%d Cycles (%d dataset iterations) ...' %
              (train_time, train_iterations))
        print("")

    sp.reset()
    t = 0
    for iteration in range(train_iterations):
        object_order = list(range(num_objects))
        random.shuffle(object_order)
        for object_id in object_order:
            for step in range(random.choice(steps_per_object)):
                sensation = random.choice(objects[object_id])
                enc.encode(sensation)
                sp.compute()
                try:
                    sdrc.compute(t,
                                 sp.columns.flat_index,
                                 classification={
                                     "bucketIdx": object_id,
                                     "actValue": object_id,
                                 },
                                 learn=True,
                                 infer=False)
                except ValueError:
                    print("Warning: len(active) = %d." % (len(sp.columns)))
                t += 1

    if verbose:
        print("TESTING ...")
        print("")
        print('Encoder Output', enc.output_sdr.statistics())
        print(sp.statistics())

    objects_columns = measure_catagories()
    _, __, stability_metric = measure_inter_intra_overlap(
        objects_columns, verbose)

    # Measure classification accuracy.  This test consists of looking at every
    # object a few times and then classifying it.  The AI is evaluated on every
    # cycle.
    score = 0
    max_score = 0
    sp.reset()
    if verbose:
        print("")
        print("Test length: %d dataset iterations." % (test_iterations))
    test_data = list(range(num_objects))
    for iteration in range(test_iterations):
        random.shuffle(test_data)
        for object_id in test_data:
            for step in range(random.choice(steps_per_object)):
                sensation = random.choice(objects[object_id])
                enc.encode(sensation)
                sp.compute(learn=True)
                inference = sdrc.infer(sp.columns.flat_index, None)[0]
                inference = np.argmax(inference)
                if inference == object_id:
                    score += 1
                max_score += 1
    if verbose:
        print('Classification Accuracy: %g %%' % (100 * score / max_score))

    if synapses_debug:
        sp.synapses.check_data_integrity()
        print("Synapse data structure integrity is OK.")

    return stability_metric + 10 * (score / max_score)
    def __init__(self,
        input_sdr,
        mini_columns,     # Integer,
        sparsity,
        potential_pool,
        permanence_inc,
        permanence_dec,
        permanence_thresh,
        segments            = 1,
        macro_columns       = (1,),
        init_dist           = (0, 0),
        boosting_alpha      = None,
        active_thresh       = 0,
        radii               = tuple()):
        """
        Argument mini_columns is an Integer, the number of mini-columns in each 
            macro-column.

        Argument macro_columns is a tuple of integers.  Dimensions of macro
            column array.  These are topological dimensions.  Macro columns are
            distributed across the input space in a uniform grid.

        Optional Argument radii defines the input topology.  Trailing extra
            input dimensions are treated as non-topological dimensions.

        Argument segments is an Integer, number proximal segments for each
            mini-column.

        Argument sparsity ...

        Argument potential_pool ...

        Optional Argument boosting_alpha is the small constant used by the
        moving exponential average which tracks each mini-columns activation
        frequency.  Default value is None, which disables boosting altogether.

        Argument permanence_inc ...
        Argument permanence_dec ...
        Argument permanence_thresh ...
        Argument init_dist is (mean, std) of initial permanence values, which is a
                 gaussian random distribution.

        Argument active_thresh ...
        """
        assert(isinstance(input_sdr, SDR))
        assert(potential_pool > 1) # Number of synapses, not percent.
        self.mini_columns     = int(round(mini_columns))
        self.macro_columns    = tuple(int(round(dim)) for dim in macro_columns)
        self.radii            = radii
        self.segments         = int(round(segments))
        self.columns          = SDR(self.macro_columns + (self.mini_columns,),
            activation_frequency_alpha = boosting_alpha,
            average_overlap_alpha      = boosting_alpha,)
        self.sparsity         = sparsity
        self.active_thresh    = active_thresh
        self.potential_pool   = potential_pool
        self.age              = 0

        segment_shape = self.macro_columns + (self.mini_columns, self.segments)
        self.synapses = SynapseManager(
            input_sdr              = input_sdr,
            output_sdr             = SDR(segment_shape),
            radii                  = radii,
            init_dist              = init_dist,
            permanence_inc         = permanence_inc,
            permanence_dec         = permanence_dec,
            permanence_thresh      = permanence_thresh,
            initial_potential_pool = self.potential_pool,)

        if init_dist == (0, 0):
            # Nupic's SP init method
            # TODO: Make this a permanent part of the synapses class?  
            # Change init_dist argument to accept a string 'sp' ?
            for idx in range(self.synapses.output_sdr.size):
                pp = self.synapses.postsynaptic_permanences[idx].shape[0]
                connnected  = np.random.random(pp) > .5
                permanences = np.random.random(pp)
                permanences[connnected] *= 1 - self.synapses.permanence_thresh
                permanences[connnected] += self.synapses.permanence_thresh
                permanences[np.logical_not(connnected)] *= self.synapses.permanence_thresh
                self.synapses.postsynaptic_permanences[idx] = np.array(permanences, dtype=np.float32)
            self.synapses.rebuild_indexes()

        # Break ties randomly, in a constant unchanging manner.
        self.tie_breakers = np.random.uniform(0, .5, size=self.synapses.output_sdr.dimensions)

        self.boosting_alpha = boosting_alpha
        if boosting_alpha is not None:
            # Make a dedicated SDR to track segment activation frequencies for
            # boosting.
            self.boosting = SDR(self.synapses.output_sdr,
                                activation_frequency_alpha = boosting_alpha,
                                average_overlap_alpha      = boosting_alpha,)
            # Initialize to the target activation frequency/sparsity.
            self.boosting.activation_frequency.fill(self.sparsity / self.segments)

        self.reset()
Пример #29
0
    def evaluate(self):
        cell_death_experiment = None
        reward_adjustment_experiment = False
        adversarial_dataset_experiment = False

        if adversarial_dataset_experiment:
            datastream = AdversarialDataset()
        else:
            datastream = Dataset(num_sequences=self.num_seq)

        # SETUP AI.
        start_time = time.time()
        enc = htm.EnumEncoder(self.enc_bits, self.enc_spar, diag=False)
        sp = htm.SpatialPooler(
            self.sp,
            input_sdr=enc.output_sdr,
            column_sdr=SDR((self.cols, )),
        )
        sp_sdrc = htm.SDR_Classifier(htm.SDRC_Parameters(alpha=0.001),
                                     sp.columns, datastream.class_shape,
                                     'index')
        tm = htm.TemporalMemory(self.tm, sp.columns)
        tm_sdrc = htm.SDR_Classifier(htm.SDRC_Parameters(alpha=0.001),
                                     tm.active, datastream.class_shape,
                                     'index')
        bg = basal_ganglia.BasalGanglia(self.bg,
                                        tm.active,
                                        future_discount=.95)
        d1_sdrc = htm.SDR_Classifier(htm.SDRC_Parameters(alpha=0.001),
                                     bg.d1.active, datastream.class_shape,
                                     'index')
        d2_sdrc = htm.SDR_Classifier(htm.SDRC_Parameters(alpha=0.001),
                                     bg.d2.active, datastream.class_shape,
                                     'index')
        memory_score = genetics.memory_fitness(2e9, 3e9)

        # SETUP ACCUMULATORS.
        anom_rand = 0
        anom_rand_total = 0
        anom_pred = 0
        anom_pred_total = 0
        sp_seq_score = 0
        tm_seq_score = 0
        d1_seq_score = 0
        d2_seq_score = 0
        sequence_total = 0
        td_error = 0
        td_error_total = 0  # RMS of TD-Error
        baseline = 0  # RMS(reward). If EV === 0 then this is also the RMS TD-Error.
        event_step = None  # A vertical line is drawn on the TD-Error graph at this step.
        if self.debug:
            input_history = []
            reward_history = []
            anomalous_input_history = []
            ev_history = []
            td_error_history = []
            anomaly_history = []

        def plot_striatum_performance_vs_reward():
            # Measure classification accuracy of each sequence.
            d1_seq_scores = []
            d2_seq_scores = []
            for seq_idx, seq in enumerate(datastream.sequences):
                reward = datastream.rewards[seq_idx]
                d1_seq_scores.append(0)
                d2_seq_scores.append(0)
                seqence_classification_samples = 0
                for measurement in range(3):
                    # Add random inputs at the start of the seqence.
                    reset_steps = random.randrange(
                        min(datastream.filler_length),
                        max(datastream.filler_length) + 1)
                    reset_noise = [
                        random.choice(datastream.inputs)
                        for step in range(reset_steps)
                    ]
                    seq = seq + reset_noise
                    for step, inp in enumerate(seq):
                        enc.encode(inp)
                        sp.compute()
                        tm.compute()
                        bg.compute(tm.active, reward=None)  # No learning.
                        # Filter out the random noise at the start of the sequence.
                        if step not in range(reset_steps):
                            d1_seq_cls = d1_sdrc.predict(bg.d1.active)
                            d2_seq_cls = d2_sdrc.predict(bg.d2.active)
                            d1_seq_scores[seq_idx] += d1_seq_cls[
                                seq_idx] / np.sum(d1_seq_cls)
                            d2_seq_scores[seq_idx] += d2_seq_cls[
                                seq_idx] / np.sum(d2_seq_cls)
                            seqence_classification_samples += 1
                d1_seq_scores[seq_idx] /= seqence_classification_samples
                d2_seq_scores[seq_idx] /= seqence_classification_samples
            # Plot the relationship between sequence value and which striatum
            # populations learned to recognise the sequence.
            plt.figure('Reward versus Striatum')
            plt.subplot(1, 2, 1)
            plt.title('D1')
            plt.plot(datastream.rewards, d1_seq_scores, 'ro')
            plt.xlabel('Sequence Reward')
            plt.ylabel('Classification Accuracy')

            plt.subplot(1, 2, 2)
            plt.title('D2')
            plt.plot(datastream.rewards, d2_seq_scores, 'bo')
            plt.xlabel('Sequence Reward')
            plt.ylabel('Classification Accuracy')

        # RUN ONLINE.
        print("Num Cycles", self.cycles)
        for step in range(self.cycles):
            inp, reward = next(datastream)
            enc.encode(inp)
            sp.compute()
            tm.compute()
            bg.compute(tm.active, reward)
            sp.learn()
            tm.learn()

            if cell_death_experiment is not None and step == self.cycles // 2:
                tm.active.kill_cells(cell_death_experiment)
                bg.d1.active.kill_cells(cell_death_experiment)
                bg.d2.active.kill_cells(cell_death_experiment)
                bg.gpe.active.kill_cells(cell_death_experiment)
                bg.gpi.active.kill_cells(cell_death_experiment)
                print("KILLED %g %% OF CELLS" % (cell_death_experiment * 100))
                event_step = step

            # Measure performance.
            if bg.td_error is not None:
                baseline += reward**2
                td_error += bg.td_error**2
                td_error_total += 1

            if datastream.anomallous:
                anom_rand += tm.anomaly
                anom_rand_total += 1
            else:
                anom_pred += tm.anomaly
                anom_pred_total += 1

            # Train and test sequence classifiers for every part of the system.
            if datastream.state[0] == 'sequence':
                sp_seq_cls = sp_sdrc.predict(sp.columns)
                tm_seq_cls = tm_sdrc.predict(tm.active)
                d1_seq_cls = d1_sdrc.predict(bg.d1.active)
                d2_seq_cls = d2_sdrc.predict(bg.d2.active)
                sequence_total += 1
                seq_idx = datastream.state[1]
                # SDR Classifier outputs a PDF.  At creation, PDF may be beneath
                # the minumum representable floating point value.
                sp_seq_score += np.nan_to_num(sp_seq_cls[seq_idx] /
                                              np.sum(sp_seq_cls))
                tm_seq_score += np.nan_to_num(tm_seq_cls[seq_idx] /
                                              np.sum(tm_seq_cls))
                d1_seq_score += np.nan_to_num(d1_seq_cls[seq_idx] /
                                              np.sum(d1_seq_cls))
                d2_seq_score += np.nan_to_num(d2_seq_cls[seq_idx] /
                                              np.sum(d2_seq_cls))
                sp_sdrc.train(sp.columns, (seq_idx, ))
                tm_sdrc.train(tm.active, (seq_idx, ))
                d1_sdrc.train(bg.d1.active, (seq_idx, ))
                d2_sdrc.train(bg.d2.active, (seq_idx, ))

            if self.debug:
                # Record everything for debugging.
                input_history.append(inp)
                reward_history.append(reward)
                anomalous_input_history.append(datastream.anomallous)
                ev_history.append(bg.expected_value)
                td_error_history.append(
                    bg.td_error if bg.td_error is not None else 0)
                anomaly_history.append(tm.anomaly)

                if reward_adjustment_experiment and step == self.cycles // 2:
                    plot_striatum_performance_vs_reward()
                    plt.show()
                    print('ADJUSTING ALL REWARDS!')
                    datastream.adjust_rewards()
                    event_step = step

        # REPORT.
        sp_seq_score = sp_seq_score / sequence_total
        tm_seq_score = tm_seq_score / sequence_total
        d1_seq_score = d1_seq_score / sequence_total
        d2_seq_score = d2_seq_score / sequence_total
        anom_pred = anom_pred / anom_pred_total
        anom_rand = anom_rand / anom_rand_total
        baseline = (baseline / td_error_total)**.5
        td_error = (td_error / td_error_total)**.5
        fitness = {
            'anom_pred': anom_pred,
            'anom_rand': anom_rand,
            'seq_sp': sp_seq_score,
            'seq_tm': tm_seq_score,
            'seq_d1': d1_seq_score,
            'seq_d2': d2_seq_score,
            'td_error': td_error / baseline,
            'time': (time.time() - start_time) / 60,
            'memory': memory_score,
        }
        if self.debug:
            print(sp.statistics())
            print(tm.statistics())
            print(bg.statistics())
            print("TD-Error Baseline", baseline, "Measured", td_error)
            print(fitness)

            plot_striatum_performance_vs_reward()

            # Plot the Reward, Expected Value, and TD-Error.
            steps = np.arange(len(input_history))
            plt.figure('Reinforcement Learning Graph')
            plt.title(
                'Reward is Red, Expected Value is Green, TD-Error is Blue.')
            plt.xlabel('Step Number')
            plt.plot(steps, reward_history, 'r', steps, ev_history, 'g', steps,
                     td_error_history, 'b')

            # Plot the Anomaly.
            plt.figure('Anomaly Graph')
            plt.title('Anomaly is Green, Unpredictable input is Red.')
            plt.plot(steps, anomaly_history, 'g', steps,
                     anomalous_input_history, 'r')
            plt.xlabel('Step Number')

            # Smooth and plot the TD error alone.
            alpha = .005
            td_err_cleaned = []
            avg = 0
            for td_err in td_error_history:
                avg = avg * (1 - alpha) + abs(td_err) * alpha
                td_err_cleaned.append(avg)
            plt.figure('TD Error')
            plt.title('Exponential Rolling average of |TD Error|, alpha = %g' %
                      alpha)
            plt.plot(steps, td_err_cleaned, 'b')
            plt.xlabel('Step Number')
            if event_step is not None:
                plt.axvline(event_step)

            plt.show()
        return fitness
Пример #30
0
    def __init__(self, parameters, input_sdr, column_sdr,
        radii=None,
        stability_sample_size=0,
        multisegment_experiment=None,
        init_dist=None,):
        """
        Argument parameters is an instance of SpatialPoolerParameters.

        Argument input_sdr ...
        Argument column_sdr ...

        Argument radii is the standard deviation of the gaussian window which
                 defines the local neighborhood of a column.  The radii
                 determine which inputs are likely to be in a columns potential
                 pool.  If radii is None then topology is disabled.  See
                 SynapseManager.normally_distributed_connections for details
                 about topology.

        Argument stability_sample_size, set to 0 to disable stability
                 monitoring, default is off.  
        """
        assert(isinstance(parameters, SpatialPoolerParameters))
        assert(isinstance(input_sdr, SDR))
        assert(isinstance(column_sdr, SDR))
        self.args = args           = parameters
        self.inputs                = input_sdr
        self.columns               = column_sdr
        self.topology              = radii is not None
        self.age                   = 0
        self.stability_schedule    = [0] if stability_sample_size > 0 else [-1]
        self.stability_sample_size = stability_sample_size
        self.stability_samples     = []

        self.multisegment = multisegment_experiment is not None
        if self.multisegment:
            # EXPERIMENTIAL: Multi-segment proximal dendrites.
            self.segments_per_cell = int(round(multisegment_experiment))
            self.proximal = SynapseManager( self.inputs,
                                            SDR(self.columns.dimensions + (self.segments_per_cell,),
                                                activation_frequency_alpha=args.boosting_alpha),    # Used for boosting!
                                            permanence_inc    = args.permanence_inc,
                                            permanence_dec    = args.permanence_dec,
                                            permanence_thresh = args.permanence_thresh,)
            # Initialize to the target activation frequency/sparsity.
            self.proximal.outputs.activation_frequency.fill(args.sparsity / self.segments_per_cell)
        else:
            self.proximal = SynapseManager( self.inputs,
                                            self.columns,
                                            permanence_inc    = args.permanence_inc,
                                            permanence_dec    = args.permanence_dec,
                                            permanence_thresh = args.permanence_thresh,)
        if self.topology:
            r = self.proximal.normally_distributed_connections(args.potential_pool, radii, init_dist=init_dist)
            self.inhibition_radii = r
        else:
            self.proximal.uniformly_distributed_connections(args.potential_pool, init_dist=init_dist)

        if args.boosting_alpha is not None:
            # Make a dedicated SDR to track column activation frequencies for
            # boosting.
            self.boosting = SDR(self.columns,
                                activation_frequency_alpha = args.boosting_alpha,
                                # Note: average overlap is useful to know, but is not part of the boosting algorithm.
                                average_overlap_alpha      = args.boosting_alpha,)
            # Initialize to the target activation frequency/sparsity.
            self.boosting.activation_frequency.fill(args.sparsity)