Ejemplo n.º 1
0
    def __init__(
            self,
            configs=(),  # block configuration
            num_l=2,  # number of labels
            min_val=0.0,  # minimum input value
            max_val=1.0,  # maximum input value
            num_i=1024,  # number of input statelets
            num_ai=128,  # number of active input statelets
            num_s=512,  # number of statelets
            num_as=8,  # number of active statelets
            pct_pool=0.8,  # pooling percentage
            pct_conn=0.5,  # initially connected percentage
            pct_learn=0.3):  # learn percentage

        PERM_THR = 20
        PERM_INC = 2
        PERM_DEC = 1

        # seed the random number generator
        # bb.seed(0) # TODO: fix seeding

        self.st = ScalarTransformer(min_val, max_val, num_i, num_ai)

        self.pc = PatternClassifier(num_l, num_s, num_as, PERM_THR, PERM_INC,
                                    PERM_DEC, pct_pool, pct_conn, pct_learn)

        self.pc.input.add_child(self.st.output, 0)

        self.pc.init()
Ejemplo n.º 2
0
    def __init__(
        self,
        configs=(),  # block configuration
        labels=(0, 1),  # labels
        min_val=-1.0,  # ScalarEncoder minimum input value
        max_val=1.0,  # ScalarEncoder maximum input value
        num_i=1024,  # ScalarEncoder number of statelets
        num_ai=128,  # ScalarEncoder number of active statelets
        num_s=32,  # PatternClassifier number of statelets
        num_as=8,  # PatternClassifier number of active statelets
        pct_pool=0.8,  # PatternClassifier pool percentage
        pct_conn=0.5,  # PatternClassifier initial connection percentage
        pct_learn=0.25):  # PatternClassifier learn percentage

        # seed the random number generator
        bb.seed(0)

        # build blocks from config descriptions if given
        blocks = get_blocks(configs)
        self.encoders = blocks["encoders"]
        self.pc = blocks["pattern_classifier"]

        if len(self.encoders) == 0:
            self.encoders.append(ScalarEncoder(min_val, max_val, num_i,
                                               num_ai))

        if self.pc == None:
            num_l = len(labels)
            self.pc = PatternClassifier(labels, num_s, num_as, 20, 2, 1,
                                        pct_pool, pct_conn, pct_learn)

        for encoder in self.encoders:
            self.pc.input.add_child(encoder.output)
Ejemplo n.º 3
0
class Classifier():
    def __init__(
            self,
            configs=(),  # block configuration
            num_l=2,  # number of labels
            min_val=0.0,  # minimum input value
            max_val=1.0,  # maximum input value
            num_i=1024,  # number of input statelets
            num_ai=128,  # number of active input statelets
            num_s=512,  # number of statelets
            num_as=8,  # number of active statelets
            pct_pool=0.8,  # pooling percentage
            pct_conn=0.5,  # initially connected percentage
            pct_learn=0.3):  # learn percentage

        PERM_THR = 20
        PERM_INC = 2
        PERM_DEC = 1

        # seed the random number generator
        # bb.seed(0) # TODO: fix seeding

        self.st = ScalarTransformer(min_val, max_val, num_i, num_ai)

        self.pc = PatternClassifier(num_l, num_s, num_as, PERM_THR, PERM_INC,
                                    PERM_DEC, pct_pool, pct_conn, pct_learn)

        self.pc.input.add_child(self.st.output, 0)

        self.pc.init()

    #def save(self, path='./', name='classifier'):
    #    self.pc.save(path + name + "_pc.bin")

    #def load(self, path='./', name='classifier'):
    #    self.pc.load(path + name + "_pc.bin")

    def fit(self, value=0.0, label=0):

        self.st.set_value(value)
        self.pc.set_label(label)
        self.st.feedforward()
        self.pc.feedforward(learn=True)

        return self.pc.get_probabilities()

    def predict(self, value=0.0):

        self.st.set_value(value)
        self.st.feedforward()
        self.pc.feedforward(learn=False)

        return self.pc.get_probabilities()
Ejemplo n.º 4
0
    def fit(self, X, y):
        """Fit the model using X as training data and y as target values

        Parameters
        ----------
        X : {array-like, sparse matrix}
            Training data. If array or matrix, shape [num_samples, num_features],

        y : {array-like, sparse matrix}
            Target values of shape = [num_samples] or [num_samples, num_outputs]


        Returns
        -------
        y_new : array, shape (num_samples, num_outputs)
            Classified data
        """

        X, y = check_X_y(X, y, multi_output=True)
        #X, y = check_X_y(X, y)

        if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
            if y.ndim != 1:
                warnings.warn(
                    "A column-vector y was passed when a 1d array "
                    "was expected. Please change the shape of y to "
                    "(num_samples, ), for example using ravel().",
                    DataConversionWarning,
                    stacklevel=2)

            self.outputs_2d_ = False
            y = y.reshape((-1, 1))
        else:
            self.outputs_2d_ = True

        check_classification_targets(y)
        self.classes_ = []
        self.class_indices_ = []
        self._y = np.empty(y.shape, dtype=np.int)

        for k in range(self._y.shape[1]):
            classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
            self.classes_.append(classes)

        #class_names, self.class_indices_ = np.unique(self.classes_, return_inverse=True)

        #self.labels = np.array([str(val) for val in self.class_indices_])

        if not self.outputs_2d_:
            self.classes_ = self.classes_[0]
            self._y = self._y.ravel()

        # class for undefined and unseen values
        if self.use_undefined_class:
            undef_class = np.max(self.classes_) + 1
            self.classes_ = np.append(self.classes_, undef_class)
            #print("with undefined class:", self.classes_)

        # update labels in DPC config
        self.dpc_config['num_l'] = len(self.classes_)

        #print("classes_:", self.classes_)
        #print("self._y:", self._y)

        # instantiate the HyperGrid Transform
        self.gridEncoder = HyperGridTransform(**self.hgt_config)

        # fit the HyperGrid transform to the data
        X_new = self.gridEncoder.fit_transform(X)

        #print("HyperGrid Parameters")
        #print(self.gridEncoder.subspace_periods)
        #print(self.gridEncoder.subspace_vectors)

        # get the number of bits being used for transformed output
        self.num_bits = self.gridEncoder.num_bits
        self.num_act_bits = self.gridEncoder.num_act_bits

        #print("BlankBlock:")
        #print("num_bits:", self.num_bits)
        #print("num_act_bits:", self.num_act_bits)

        # Blank Block to hold the hypergrid output
        self.blankBlock = BlankBlock(num_s=self.num_bits)

        # Create PatternClassifier block
        self.dpc = PatternClassifier(**self.dpc_config)

        #print("PatternClassifier:")
        #self.dpc.print_parameters()

        # connect blocks together
        self.dpc.input.add_child(self.blankBlock.output, 0)

        # Train Network
        probs = self._fit(X_new, self._y)
        #print("data:", X_new)
        #print("labels:", self._y)
        #print("training:", probs)

        return self
Ejemplo n.º 5
0
class BBClassifier:
    def __init__(
            self,
            # Training Arguments
            num_epochs=3,
            use_undefined_class=False,

            # Distributed Pattern Classifier Arguments
            num_l=2,  # number of labels
            num_s=512,  # number of statelets
            num_as=8,  # number of active statelets
            pct_pool=0.8,  # percent pooled
            pct_conn=0.8,  # percent initially connected
            pct_learn=0.3,  # percent learn
            seed=0,

            # HyperGrid Transform Arguments
            num_bins=4,
            num_acts=1,
            num_grids=64,
            num_subspace_dims=1,
            origin=None,
            num_input_dims=None,
            max_period=2.0,
            min_period=0.05,
            use_orthogonal_bases=False,
            use_normal_dist_bases=False,
            use_standard_bases=False,
            set_bases=None,
            set_periods=None,
            use_random_uniform_periods=False,
            use_evenly_spaced_periods=False,
            random_state=None):
        """Classify N-dimensional inputs using Hypergrid Transform and Distributed Pattern Classifier

        Parameters
        ----------
        num_epochs: integer
        Number of training epochs

        use_undefined_class: boolean
        Whether to reserve a class for test samples that have no training data

        num_s: integer
        Number of class detectors to allocate for the distributed pattern classifier

        num_as: integer
        Number of active class detectors per time step for the distributed pattern classifier

        pct_pool: float, Between 0.0 and 1.0
        Percentage of random bits an individual detector has potential access to.

        pct_conn: float, Between 0.0 and pct_pool
        Percentage of random bits an individual detector is currently connected to.

        pct_learn: float, Between 0.0 and 1.0
        Percentage of bits to update when training occurs.

        num_bins: integer
        Number of bins to create for each grid.

        num_acts: integer
        Number of contiguous bins to activate along each subspace dimension for each grid.

        num_grids: integer
        Number of grids to generate.

        num_subspace_dims: integer
        Dimensionality of subspaces to map input to

        origin: array-like, shape {num_features}
        Point of origin in input space for embedding a sample into the grids.

        max_period: float
        Maximum bound on grid period

        min_period: float
        Minimum bound on grid period

        use_orthogonal_bases: boolean
        Generate random orthogonal basis vectors for each subspace

        use_normal_dist_bases: boolean
        Generate normal distribution of basis vectors, points sampled on a sphere

        use_standard_bases: boolean
        Use randomly selected standard basis vectors for each grid

        set_bases: array-like, shape {num_grids, num_subspace_dims, num_features}
        Use manually specified subspace basis vectors for each grid

        set_periods: array-like, shape {num_grids, num_subspace_dims}
        Use manually specified periods for each grid and its subspaces

        use_random_uniform_periods: boolean
        Use random periods for subspace grids

        use_evenly_spaced_periods: boolean
        Use evenly spaced periods for subspace grids over the interval min_period to max_period

        random_state: integer
        Seed for random number generators


        """

        self.num_epochs = num_epochs
        self.use_undefined_class = use_undefined_class
        self._y = []
        self.classes_ = np.array([])
        self.outputs_2d_ = False

        self.dpc_config = dict(num_l=num_l,
                               num_s=num_s,
                               num_as=num_as,
                               perm_thr=20,
                               perm_inc=2,
                               perm_dec=1,
                               pct_pool=pct_pool,
                               pct_conn=pct_conn,
                               pct_learn=pct_learn,
                               num_t=2,
                               seed=seed)

        self.hgt_config = dict(
            num_grids=num_grids,
            num_bins=num_bins,
            num_acts=num_acts,
            num_subspace_dims=num_subspace_dims,
            origin=origin,
            num_input_dims=num_input_dims,
            max_period=max_period,
            min_period=min_period,
            use_normal_dist_bases=use_normal_dist_bases,
            use_standard_bases=use_standard_bases,
            use_orthogonal_bases=use_orthogonal_bases,
            use_evenly_spaced_periods=use_evenly_spaced_periods,
            use_random_uniform_periods=use_random_uniform_periods,
            set_bases=set_bases,
            set_periods=set_periods,
            random_state=random_state,
            flatten_output=True)

    def __del__(self):
        pass

    def _generate_config(self, num_labels=2):
        # connect BlankBlock to DPC
        # optionally add PatternPooler
        pass

    def reset(self):
        pass

    def fit(self, X, y):
        """Fit the model using X as training data and y as target values

        Parameters
        ----------
        X : {array-like, sparse matrix}
            Training data. If array or matrix, shape [num_samples, num_features],

        y : {array-like, sparse matrix}
            Target values of shape = [num_samples] or [num_samples, num_outputs]


        Returns
        -------
        y_new : array, shape (num_samples, num_outputs)
            Classified data
        """

        X, y = check_X_y(X, y, multi_output=True)
        #X, y = check_X_y(X, y)

        if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
            if y.ndim != 1:
                warnings.warn(
                    "A column-vector y was passed when a 1d array "
                    "was expected. Please change the shape of y to "
                    "(num_samples, ), for example using ravel().",
                    DataConversionWarning,
                    stacklevel=2)

            self.outputs_2d_ = False
            y = y.reshape((-1, 1))
        else:
            self.outputs_2d_ = True

        check_classification_targets(y)
        self.classes_ = []
        self.class_indices_ = []
        self._y = np.empty(y.shape, dtype=np.int)

        for k in range(self._y.shape[1]):
            classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
            self.classes_.append(classes)

        #class_names, self.class_indices_ = np.unique(self.classes_, return_inverse=True)

        #self.labels = np.array([str(val) for val in self.class_indices_])

        if not self.outputs_2d_:
            self.classes_ = self.classes_[0]
            self._y = self._y.ravel()

        # class for undefined and unseen values
        if self.use_undefined_class:
            undef_class = np.max(self.classes_) + 1
            self.classes_ = np.append(self.classes_, undef_class)
            #print("with undefined class:", self.classes_)

        # update labels in DPC config
        self.dpc_config['num_l'] = len(self.classes_)

        #print("classes_:", self.classes_)
        #print("self._y:", self._y)

        # instantiate the HyperGrid Transform
        self.gridEncoder = HyperGridTransform(**self.hgt_config)

        # fit the HyperGrid transform to the data
        X_new = self.gridEncoder.fit_transform(X)

        #print("HyperGrid Parameters")
        #print(self.gridEncoder.subspace_periods)
        #print(self.gridEncoder.subspace_vectors)

        # get the number of bits being used for transformed output
        self.num_bits = self.gridEncoder.num_bits
        self.num_act_bits = self.gridEncoder.num_act_bits

        #print("BlankBlock:")
        #print("num_bits:", self.num_bits)
        #print("num_act_bits:", self.num_act_bits)

        # Blank Block to hold the hypergrid output
        self.blankBlock = BlankBlock(num_s=self.num_bits)

        # Create PatternClassifier block
        self.dpc = PatternClassifier(**self.dpc_config)

        #print("PatternClassifier:")
        #self.dpc.print_parameters()

        # connect blocks together
        self.dpc.input.add_child(self.blankBlock.output, 0)

        # Train Network
        probs = self._fit(X_new, self._y)
        #print("data:", X_new)
        #print("labels:", self._y)
        #print("training:", probs)

        return self

    def predict(self, X):
        """Predict the class labels for the provided data

        Parameters
        ----------
        X : array-like, shape (num_query, num_features)
            Test samples.

        Returns
        -------
        y : array of shape [num_samples] or [num_samples, num_outputs]
            Class labels for each data sample.
        """
        X = check_array(X)

        num_samples, num_features = X.shape

        if self._y.ndim == 1 or self._y.ndim == 2 and self._y.shape[1] == 1:
            num_outputs = 1
        else:
            num_outputs = self._y.shape[1]

        # transform data
        X_new = self.gridEncoder.transform(X)

        probabilities = self._predict(X_new)

        classes_ = self.classes_
        if not self.outputs_2d_:
            classes_ = [self.classes_]

        #classes_ = self.labels
        #if not self.outputs_2d_:
        #    classes_ = [self.labels]

        y_pred = np.empty((num_samples, num_outputs), dtype=classes_[0].dtype)

        # print("y_pred:", type(y_pred), y_pred.shape)
        # print("y_pred:", y_pred)

        for k in range(num_samples):
            py = probabilities[k, :]
            y_best = np.argmax(py)
            # print("y_best =", y_best)
            y_pred[k, :] = y_best

        if not self.outputs_2d_:
            y_pred = y_pred.ravel()

        # print("y_pred:", type(y_pred), y_pred.shape)
        # print("y_pred:", y_pred)

        return y_pred

    def predict_proba(self, X):
        """Return probability estimates for the test data X.

        Parameters
        ----------
        X : array-like, shape (num_query, num_features), \
                or (num_query, num_indexed) if metric == 'precomputed'
            Test samples.

        Returns
        -------
        p : array of shape = [num_samples, num_classes], or a list of num_outputs
            of such arrays if num_outputs > 1.
            The class probabilities of the input samples. Classes are ordered
            by lexicographic order.
        """
        X = check_array(X)

        num_samples, num_features = X.shape

        if self._y.ndim == 1 or self._y.ndim == 2 and self._y.shape[1] == 1:
            num_outputs = 1
        else:
            num_outputs = self._y.shape[1]

        # transform data
        X_new = self.gridEncoder.transform(X)

        return self._predict(X_new)

    def score(self, X, y, sample_weight=None):
        """Returns the mean accuracy on the given test data and labels.

        In multi-label classification, this is the subset accuracy
        which is a harsh metric since you require for each sample that
        each label set be correctly predicted.

        Parameters
        ----------
        X : array-like, shape = (num_samples, num_features)
            Test samples.

        y : array-like, shape = (num_samples) or (num_samples, num_outputs)
            True labels for X.

        sample_weight : array-like, shape = [num_samples], optional
            Sample weights.

        Returns
        -------
        score : float
            Mean accuracy of self.predict(X) wrt. y.

        """
        from sklearn.metrics import accuracy_score

        return accuracy_score(y, self.predict(X), sample_weight=sample_weight)

    def _fit(self, X, y):

        probabilities = []
        # train pattern classifier
        for i in range(self.num_epochs):
            epoch_probs = []

            # Train Network
            #t0 = time.time()
            for k in range(y.shape[0]):
                input = X[k, :]
                target = y[k]

                self.blankBlock.output.bits = input
                self.blankBlock.feedforward()
                self.dpc.set_label(target)
                self.dpc.feedforward(learn=True)

                curr_prob = self.dpc.get_probabilities()
                epoch_probs.append(curr_prob)

            #t1 = time.time()
            #print("train epoch time = %fs with size %d" % ((t1 - t0), y.shape[0]))
            probabilities.append(epoch_probs)

        return np.asarray(probabilities)

    def _predict(self, X):

        probabilities = []

        # num_points = 1000
        num_points = X.shape[0]

        #t0 = time.time()
        for k in range(X.shape[0]):
            input = X[k, :]

            self.blankBlock.output.bits = input
            self.blankBlock.feedforward()
            self.dpc.feedforward(learn=False)

            curr_prob = self.dpc.get_probabilities()
            probabilities.append(curr_prob)

        #t1 = time.time()
        #print("%d points, time = %fs" % (num_points, (t1 - t0)))

        return np.asarray(probabilities)
Ejemplo n.º 6
0
hog_fd = hog(x_train[0],
             orientations=orientations,
             pixels_per_cell=pixels_per_cell,
             cells_per_block=cells_per_block,
             visualize=False,
             multichannel=False,
             feature_vector=True)

# setup BrainBlocks architecture
blankblock = BlankBlock(num_s=len(hog_fd))
classifier = PatternClassifier(
    labels=(0,1,2,3,4,5,6,7,8,9),
    num_s=num_s,
    num_as=9,
    perm_thr=20,
    perm_inc=2,
    perm_dec=1,
    pct_pool=0.8,
    pct_conn=1.0,
    pct_learn=0.25)

classifier.input.add_child(blankblock.output)

# train BrainBlocks classifier
bb_train_time = 0
print("Training...", flush=True)

for _ in range(num_epochs):
    for i in range(num_trains):
        hog_fd = hog(x_train[i],
                     orientations=orientations,
Ejemplo n.º 7
0
        num_as=8)  # number of active statelets

    sl = SequenceLearner(
        num_spc=10,  # number of statelets per column
        num_dps=10,  # number of coincidence detectors per statelet
        num_rpd=12,  # number of receptors per coincidence detector
        d_thresh=6,  # coincidence detector threshold
        perm_thr=1,  # receptor permanence threshold
        perm_inc=1,  # receptor permanence increment
        perm_dec=0)  # receptor permanence decrement

    pc = PatternClassifier(
        labels=labels,  # user-defined labels
        num_s=640,  # number of statelets
        num_as=8,  # number of active statelets
        perm_thr=20,  # receptor permanence threshold
        perm_inc=2,  # receptor permanence increment
        perm_dec=1,  # receptor permanence decrement
        pct_pool=0.8,  # pooling percentage
        pct_conn=0.8,  # initially connected percentage
        pct_learn=0.25)  # learn percentage 0.25

    sl.input.add_child(e.output)
    pc.input.add_child(sl.output)

    aed = AbnormalEventDetector(5, 5)

    print('val  scr  lbl  prob  ae  output_active_statelets')

    for i in range(len(values)):
        e.compute(value=values[i])
        sl.compute(learn=True)
num_trains = len(x_train)
num_tests = len(x_test)
pixel_thresh = 128  # from 0 to 255

# setup BrainBlocks classifier architecture
input_block = BlankBlock(num_s=784)

labels = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9)

num_statelets = 1000

classifier = PatternClassifier(labels=labels,
                               num_s=num_statelets,
                               num_as=10,
                               perm_thr=20,
                               perm_inc=2,
                               perm_dec=1,
                               pct_pool=0.1,
                               pct_conn=1.0,
                               pct_learn=0.25)

classifier.input.add_child(input_block.output)

# train BrainBLocks classifier
print("Training...", flush=True)
t0 = time.time()
for i in range(num_trains):
    bitimage = binarize_image(x_train[i], pixel_thresh)
    input_block.output.bits = flatten_image(bitimage)
    classifier.compute(y_train[i], learn=True)
Ejemplo n.º 9
0
def test_pattern_classifier():
    e = SymbolsEncoder(
        max_symbols=8,  # maximum number of symbols
        num_s=1024)  # number of statelets

    pc = PatternClassifier(
        labels=(0, 1),  # user-defined labels
        num_s=512,  # number of statelets
        num_as=8,  # number of active statelets
        perm_thr=20,  # receptor permanence threshold
        perm_inc=2,  # receptor permanence increment
        perm_dec=1,  # receptor permanence decrement
        pct_pool=0.8,  # pooling percentage
        pct_conn=0.5,  # initially connected percentage
        pct_learn=0.25)  # learn percentage

    pc.input.add_child(e.output)

    for _ in range(10):
        e.compute(value=0)
        pc.compute(label=0, learn=True)
        e.compute(value=1)
        pc.compute(label=1, learn=True)

    e.compute(value=0)
    pc.compute(learn=False)
    actual_probs = np.array(pc.get_probabilities())
    expect_probs = np.array([1.0, 0.0])
    np.testing.assert_array_equal(actual_probs, expect_probs)

    e.compute(value=1)
    pc.compute(learn=False)
    actual_probs = np.array(pc.get_probabilities())
    expect_probs = np.array([0.0, 1.0])
    np.testing.assert_array_equal(actual_probs, expect_probs)
Ejemplo n.º 10
0
run_t0 = time.time()

# Define scenario parameters
num_epochs = 1
num_trains = len(x_train)
num_tests = len(x_test)
pixel_thresh = 128  # from 0 to 255
num_s = 8000

# Setup BrainBlocks architecture
blankblock = BlankBlock(num_s=784)
classifier = PatternClassifier(num_l=10,
                               num_s=num_s,
                               num_as=9,
                               perm_thr=20,
                               perm_inc=2,
                               perm_dec=1,
                               pct_pool=0.8,
                               pct_conn=1.0,
                               pct_learn=0.3)

classifier.input.add_child(blankblock.output, 0)

# Train BrainBlocks classifier
bb_train_time = 0
print("Training...", flush=True)

for _ in range(num_epochs):
    for i in range(num_trains):
        bitimage = binarize(x_train[i], pixel_thresh)
        blankblock.output.bits = flatten(bitimage)
Ejemplo n.º 11
0
# retrieve the integer classes from above
int_classes = [k for k in range(len(le.classes_))]

# define blocks
se = ScalarEncoder(
    min_val=-1.0,  # minimum input value
    max_val=1.0,  # maximum input value
    num_s=1024,  # number of statelets
    num_as=128)  # number of active statelets

pp = PatternClassifier(
    labels=int_classes,  # user-defined labels
    num_s=512,  # number of statelets
    num_as=8,  # number of active statelets
    perm_thr=20,  # receptor permanence threshold
    perm_inc=2,  # receptor permanence increment
    perm_dec=1,  # receptor permanence decrement
    pct_pool=0.8,  # pooling percentage
    pct_conn=0.5,  # initially connected percentage
    pct_learn=0.25)  # learn percentage

# connect blocks
pp.input.add_child(se.output)

# fit
for i in range(len(x_trains)):
    se.compute(x_trains[i])
    pp.compute(y_trains_ints[i], learn=True)

# predict
probs = []
Ejemplo n.º 12
0
class Classifier():
    def __init__(
        self,
        configs=(),  # block configuration
        labels=(0, 1),  # labels
        min_val=-1.0,  # ScalarEncoder minimum input value
        max_val=1.0,  # ScalarEncoder maximum input value
        num_i=1024,  # ScalarEncoder number of statelets
        num_ai=128,  # ScalarEncoder number of active statelets
        num_s=32,  # PatternClassifier number of statelets
        num_as=8,  # PatternClassifier number of active statelets
        pct_pool=0.8,  # PatternClassifier pool percentage
        pct_conn=0.5,  # PatternClassifier initial connection percentage
        pct_learn=0.25):  # PatternClassifier learn percentage

        # seed the random number generator
        bb.seed(0)

        # build blocks from config descriptions if given
        blocks = get_blocks(configs)
        self.encoders = blocks["encoders"]
        self.pc = blocks["pattern_classifier"]

        if len(self.encoders) == 0:
            self.encoders.append(ScalarEncoder(min_val, max_val, num_i,
                                               num_ai))

        if self.pc == None:
            num_l = len(labels)
            self.pc = PatternClassifier(labels, num_s, num_as, 20, 2, 1,
                                        pct_pool, pct_conn, pct_learn)

        for encoder in self.encoders:
            self.pc.input.add_child(encoder.output)

    def print_parameters(self):
        for encoder in self.encoders:
            encoder.print_parameters()
        self.pc.print_parameters()

    def save_memories(self, path='./', name='classifier'):
        self.pc.save_memories(path + name + "_pc.bin")

    def load_memories(self, path='./', name='classifier'):
        self.pc.load_memories(path + name + "_pc.bin")

    def fit(self, inputs=(), labels=()):
        probs = []
        num_steps = 0xFFFFFFFF
        num_measurands = len(inputs)
        num_encoders = len(self.encoders)

        if num_measurands != num_encoders:
            print("Warning: compute() num_measurands != num_encoders")
            return probs

        for input in inputs:
            len_input = len(input)
            if len_input < num_steps:
                num_steps = len_input

        for s in range(num_steps):
            for e in range(num_encoders):
                self.encoders[e].compute(inputs[e][s])
            self.pc.compute(labels[s], learn=True)
            probs.append(self.pc.get_probabilities())

        return probs

    def predict(self, inputs=()):
        probs = []
        num_steps = 0xFFFFFFFF
        num_measurands = len(inputs)
        num_encoders = len(self.encoders)

        if num_measurands != num_encoders:
            print("Warning: compute() num_measurands != num_encoders")
            return probs

        for input in inputs:
            len_input = len(input)
            if len_input < num_steps:
                num_steps = len_input

        for s in range(num_steps):
            for e in range(num_encoders):
                self.encoders[e].compute(inputs[e][s])
            self.pc.compute(0, learn=False)
            probs.append(self.pc.get_probabilities())

        return probs
Ejemplo n.º 13
0
def test_pattern_classifier():
    e = SymbolsEncoder(max_symbols=8, num_s=1024)

    pc = PatternClassifier(labels=(0, 1),
                           num_s=512,
                           num_as=8,
                           perm_thr=20,
                           perm_inc=2,
                           perm_dec=1,
                           pct_pool=0.8,
                           pct_conn=0.5,
                           pct_learn=0.25)

    pc.input.add_child(e.output)

    for _ in range(10):
        e.compute(0)
        pc.compute(0, True)
        e.compute(1)
        pc.compute(1, True)

    e.compute(0)
    pc.compute(0, False)
    actual_probs = np.array(pc.get_probabilities())
    expect_probs = np.array([1.0, 0.0])
    np.testing.assert_array_equal(actual_probs, expect_probs)

    e.compute(1)
    pc.compute(1, False)
    actual_probs = np.array(pc.get_probabilities())
    expect_probs = np.array([0.0, 1.0])
    np.testing.assert_array_equal(actual_probs, expect_probs)
Ejemplo n.º 14
0
le = preprocessing.LabelEncoder()
le.fit(y_trains)
y_trains_ints = le.transform(y_trains)

# Setup blocks
st = ScalarTransformer(
    min_val=-1.0,  # minimum input value
    max_val=1.0,  # maximum input value
    num_s=1024,  # number of statelets
    num_as=128)  # number of active statelets

pp = PatternClassifier(
    num_l=2,  # number of labels
    num_s=512,  # number of statelets
    num_as=8,  # number of active statelets
    perm_thr=20,  # receptor permanence threshold
    perm_inc=2,  # receptor permanence increment
    perm_dec=1,  # receptor permanence decrement
    pct_pool=0.8,  # percent pooled
    pct_conn=0.5,  # percent initially connected
    pct_learn=0.3)  # percent learn

# Connect blocks
pp.input.add_child(st.output, 0)

# Fit
for i in range(len(x_trains)):
    st.set_value(x_trains[i])
    pp.set_label(y_trains_ints[i])
    st.feedforward()
    pp.feedforward(learn=True)
Ejemplo n.º 15
0
        num_as=8)  # number of active statelets

    sl = SequenceLearner(
        num_spc=10,  # number of statelets per column
        num_dps=10,  # number of coincidence detectors per statelet
        num_rpd=12,  # number of receptors per coincidence detector
        d_thresh=6,  # coincidence detector threshold
        perm_thr=1,  # receptor permanence threshold
        perm_inc=1,  # receptor permanence increment
        perm_dec=0)  # receptor permanence decrement

    pc = PatternClassifier(
        num_l=20,  # number of labels
        num_s=640,  # number of statelets
        num_as=8,  # number of active statelets
        perm_thr=20,  # receptor permanence threshold
        perm_inc=2,  # receptor permanence increment
        perm_dec=1,  # receptor permanence decrement
        pct_pool=0.8,  # pooling percentage
        pct_conn=0.8,  # initially connected percentage
        pct_learn=0.25)  # learn percentage 0.25

    sl.input.add_child(st.output)
    pc.input.add_child(sl.output)

    aed = AbnormalEventDetector(5, 5)

    print('val  scr  lbl  prob  ae  output_active_statelets')

    for i in range(len(values)):

        st.set_value(values[i])