Esempio n. 1
0
    def infer(self, evidence, is_reset=False):
        """Compute output vector p(evidence|C). Gaussian, dot and product
        inference algorithms are set in the constructor.

        Args:
            evidence: a vector

        Returns:
            A vector representing the activations of coincidences given
            the evidence. The vector is normalized to sum to 1.
        """
        if is_reset:
            return
        # TODO ratat statistiku coincidencii este pocas temporalneho ucenia?
        y = np.ones(len(self.coincidences))

        if self.algorithm == "gaussian":
            # computing distances between evidence and all coincidences
            # TODO refactor using self._distance and np.vectorize
            dist = (self.coincidences_matrix - evidence) ** 2
            dist = np.sum(dist, axis=1) ** 0.5
            y = self.vectorized_gaussian(dist)

            # sparsifying the output
        #            num_max_values = int(len(y) * .05)  # 5%
        #            z = y.argsort()[-3:]  # 3 max values
        ##            closest_id = np.argmax(y)
        #            mask = np.zeros(len(y))
        #            for i in z:
        #                mask[i] = 1;
        #            y = y * mask

        if self.algorithm == "dot":
            for i, c in enumerate(self.coincidences.values()):
                y[i] = np.dot(evidence, c)

        # Equation A.2 from George's thesis, it assumes that the evidences from
        # the children can be combined independently given the node's coincidences
        #
        # Example:
        #   a node has two children with three temporal groups, the concatinated
        #   input from those children is i=[0.8, 0.1, 0.1, 0.5, 0.2, 0.3],
        #   eg. input from the 1st children is [0.8, 0.1, 0.1] and from the 2nd
        #   [0.5, 0.2, 0.3], a stored coincidence is [0, 2], the computation
        #   would be i[0] * i[2] = 0.8 * 0.2 = 0.16

        if self.algorithm == "product":
            for i, c in enumerate(self.coincidences.values()):
                y[i] = self._product_inference(c, evidence)

        if self.algorithm == "sum":
            for i, c in enumerate(self.coincidences.values()):
                y[i] = self._sum_inference(c, evidence)

        return utils.normalize_to_one(y)
Esempio n. 2
0
    def infer(self, evidence, is_reset=False):
        """Compute output vector p(evidence|C). Gaussian, dot and product
        inference algorithms are set in the constructor.

        Args:
            evidence: a vector

        Returns:
            A vector representing the activations of coincidences given
            the evidence. The vector is normalized to sum to 1.
        """
        if is_reset:
            return
        # TODO ratat statistiku coincidencii este pocas temporalneho ucenia?
        y = np.ones(len(self.coincidences))

        if self.algorithm == 'gaussian':
            # computing distances between evidence and all coincidences
            # TODO refactor using self._distance and np.vectorize
            dist = (self.coincidences_matrix - evidence) ** 2
            dist = np.sum(dist, axis=1) ** .5
            y = self.vectorized_gaussian(dist)

            # sparsifying the output
#            num_max_values = int(len(y) * .05)  # 5%
#            z = y.argsort()[-3:]  # 3 max values
##            closest_id = np.argmax(y)
#            mask = np.zeros(len(y))
#            for i in z:
#                mask[i] = 1;
#            y = y * mask

        if self.algorithm == 'dot':
            for i, c in enumerate(self.coincidences.values()):
                y[i] = np.dot(evidence, c)

        # Equation A.2 from George's thesis, it assumes that the evidences from
        # the children can be combined independently given the node's coincidences
        #
        # Example:
        #   a node has two children with three temporal groups, the concatinated
        #   input from those children is i=[0.8, 0.1, 0.1, 0.5, 0.2, 0.3],
        #   eg. input from the 1st children is [0.8, 0.1, 0.1] and from the 2nd
        #   [0.5, 0.2, 0.3], a stored coincidence is [0, 2], the computation
        #   would be i[0] * i[2] = 0.8 * 0.2 = 0.16

        if self.algorithm == 'product':
            for i, c in enumerate(self.coincidences.values()):
                y[i] = self._product_inference(c, evidence)

        if self.algorithm == 'sum':
            for i, c in enumerate(self.coincidences.values()):
                y[i] = self._sum_inference(c, evidence)

        return utils.normalize_to_one(y)
Esempio n. 3
0
    def testNormalizeToOne(self):
        data = ((1, 0, 0.0), (0.5, 4, 3), (0.1, 0.1, 0.1))
        data = np.asarray(data)

        for x in data:
            output = utils.normalize_to_one(x)
            self.assertEqual(output.sum(), 1)

        bad_data = ((0, 0, 0), (2, 0, -2))
        bad_data = np.asarray(bad_data)

        for x in bad_data:
            self.assertRaises(ValueError, utils.normalize_to_one, x)
Esempio n. 4
0
    def testNormalizeToOne(self):
        data = ((1, 0, 0.0), (0.5, 4, 3), (0.1, 0.1, 0.1))
        data = np.asarray(data)

        for x in data:
            output = utils.normalize_to_one(x)
            self.assertEqual(output.sum(), 1)

        bad_data = ((0, 0, 0), (2, 0, -2))
        bad_data = np.asarray(bad_data)

        for x in bad_data:
            self.assertRaises(ValueError, utils.normalize_to_one, x)
Esempio n. 5
0
    def infer(self, y, sparsify=False):
        """Compute the activations of temporal groups given the activations
        of coincidences from the temporal pooler.
        """
        if self.algorithm == 'maxProp':
            out = np.max(self.PCG * y, axis=1)  # max for each group (row)

        if self.algorithm == 'sumProp':
            out = np.dot(self.PCG, y)  # sum for each group (row)

        # TODO time-based inference
        if self.algorithm == 'tbi':
            pass

        if sparsify:
            # zero-out all but the maximal value
            max_index = np.argmax(out)
            out *= 0
            out[max_index] = 1

        return utils.normalize_to_one(out)
Esempio n. 6
0
    def infer(self, y, sparsify=False):
        """Compute the activations of temporal groups given the activations
        of coincidences from the temporal pooler.
        """
        if self.algorithm == 'maxProp':
            out = np.max(self.PCG * y, axis=1)  # max for each group (row)

        if self.algorithm == 'sumProp':
            out = np.dot(self.PCG, y)  # sum for each group (row)

        # TODO time-based inference
        if self.algorithm == 'tbi':
            pass

        if sparsify:
            # zero-out all but the maximal value
            max_index = np.argmax(out)
            out *= 0
            out[max_index] = 1

        return utils.normalize_to_one(out)