Пример #1
0
    def __init__(self, n, d, real_dtype="d", integer_dtype="l"):
        """Initializes an object of type 'OneDimensionalHitParade'.
        
        :param n: Number of maxima and minima to remember.
        :type n: int
        
        :param d: Minimum gap between two hits.
        :type d: int
        
        :param real_dtype: Datatype of sequence items
        :type real_dtype: numpy.dtype or str
        
        :param integer_dtype: Datatype of sequence indices
        :type integer_dtype: numpy.dtype or str
        """
        self.n = int(n)
        self.d = int(d)
        self.iM = numx.zeros((n, ), dtype=integer_dtype)
        self.im = numx.zeros((n, ), dtype=integer_dtype)

        real_dtype = numx.dtype(real_dtype)
        if real_dtype in mdp.utils.get_dtypes('AllInteger'):
            max_num = numx.iinfo(real_dtype).max
            min_num = numx.iinfo(real_dtype).min
        else:
            max_num = numx.finfo(real_dtype).max
            min_num = numx.finfo(real_dtype).min
        self.M = numx.array([min_num] * n, dtype=real_dtype)
        self.m = numx.array([max_num] * n, dtype=real_dtype)

        self.lM = 0
        self.lm = 0
Пример #2
0
 def __init__(self, n, d, real_dtype="d", integer_dtype="l"):
     """Initializes an object of type 'OneDimensionalHitParade'.
     
     :param n: Number of maxima and minima to remember.
     :type n: int
     
     :param d: Minimum gap between two hits.
     :type d: int
     
     :param real_dtype: Datatype of sequence items
     :type real_dtype: numpy.dtype or str
     
     :param integer_dtype: Datatype of sequence indices
     :type integer_dtype: numpy.dtype or str
     """
     self.n = int(n)
     self.d = int(d)
     self.iM = numx.zeros((n, ), dtype=integer_dtype)
     self.im = numx.zeros((n, ), dtype=integer_dtype)
     
     real_dtype = numx.dtype(real_dtype)
     if real_dtype in mdp.utils.get_dtypes('AllInteger'):
         max_num = numx.iinfo(real_dtype).max
         min_num = numx.iinfo(real_dtype).min
     else:
         max_num = numx.finfo(real_dtype).max
         min_num = numx.finfo(real_dtype).min
     self.M = numx.array([min_num]*n, dtype=real_dtype)
     self.m = numx.array([max_num]*n, dtype=real_dtype)
     
     self.lM = 0
     self.lm = 0
Пример #3
0
 def test_incompatible_arrays(self):
     """Test with incompatible arrays."""
     rescont = MessageResultContainer()
     msgs = [{"a":  np.zeros((10,3))}, {"a":  np.zeros((10,4))}]
     for msg in msgs:
         rescont.add_message(msg)
     pytest.raises(ValueError, rescont.get_message)
Пример #4
0
    def __init__(self, n, d, real_dtype="d", integer_dtype="l"):
        """
        Input arguments:
        n -- Number of maxima and minima to remember
        d -- Minimum gap between two hits

        real_dtype -- dtype of sequence items
        integer_dtype -- dtype of sequence indices
        Note: be careful with dtypes!
        """
        self.n = int(n)
        self.d = int(d)
        self.iM = numx.zeros((n, ), dtype=integer_dtype)
        self.im = numx.zeros((n, ), dtype=integer_dtype)
        
        real_dtype = numx.dtype(real_dtype)
        if real_dtype in mdp.utils.get_dtypes('AllInteger'):
            max_num = numx.iinfo(real_dtype).max
            min_num = numx.iinfo(real_dtype).min
        else:
            max_num = numx.finfo(real_dtype).max
            min_num = numx.finfo(real_dtype).min
        self.M = numx.array([min_num]*n, dtype=real_dtype)
        self.m = numx.array([max_num]*n, dtype=real_dtype)
        
        self.lM = 0
        self.lm = 0
Пример #5
0
 def _init_internals(self):
     input_dim = self.input_dim
     self._mean = numx.zeros((input_dim,), dtype='d')
     self._var = numx.zeros((input_dim,), dtype='d')
     self._tlen = 0
     self._diff2 = numx.zeros((input_dim,), dtype='d')
     self._initialized = 1
Пример #6
0
 def test_mixed_dict(self):
     """Test msg being a dict containing an array."""
     rescont = MessageResultContainer()
     msg1 = {
         "f": 2,
         "a": np.zeros((10, 3), 'int'),
         "b": "aaa",
         "c": 1,
     }
     msg2 = {
         "a": np.ones((15, 3), 'int'),
         "b": "bbb",
         "c": 3,
         "d": 1,
     }
     rescont.add_message(msg1)
     rescont.add_message(msg2)
     combined_msg = rescont.get_message()
     a = np.zeros((25, 3), 'int')
     a[10:] = 1
     reference_msg = {"a": a, "c": 4, "b": "aaabbb", "d": 1, "f": 2}
     assert np.all(reference_msg["a"] == reference_msg["a"])
     combined_msg.pop("a")
     reference_msg.pop("a")
     assert combined_msg == reference_msg
Пример #7
0
    def _execute(self, x):
        degree = self._degree
        dim = self.input_dim
        n = x.shape[1]

        # preallocate memory
        dexp = numx.zeros((self.output_dim, x.shape[0]), dtype=self.dtype)
        # copy monomials of degree 1
        dexp[0:n, :] = x.T

        k = n
        prec_end = 0
        next_lens = numx.ones((dim+1, ))
        next_lens[0] = 0
        for i in range(2, degree+1):
            prec_start = prec_end
            prec_end += nmonomials(i-1, dim)
            prec = dexp[prec_start:prec_end, :]

            lens = next_lens[:-1].cumsum(axis=0)
            next_lens = numx.zeros((dim+1, ))
            for j in range(dim):
                factor = prec[lens[j]:, :]
                len_ = factor.shape[0]
                dexp[k:k+len_, :] = x[:, j] * factor
                next_lens[j+1] = len_
                k = k+len_

        return dexp.T
Пример #8
0
    def __init__(self, n, d, real_dtype="d", integer_dtype="l"):
        """
        Input arguments:
        n -- Number of maxima and minima to remember
        d -- Minimum gap between two hits

        real_dtype -- dtype of sequence items
        integer_dtype -- dtype of sequence indices
        Note: be careful with dtypes!
        """
        self.n = int(n)
        self.d = int(d)
        self.iM = numx.zeros((n, ), dtype=integer_dtype)
        self.im = numx.zeros((n, ), dtype=integer_dtype)
        
        real_dtype = numx.dtype(real_dtype)
        if real_dtype in mdp.utils.get_dtypes('AllInteger'):
            max_num = numx.iinfo(real_dtype).max
            min_num = numx.iinfo(real_dtype).min
        else:
            max_num = numx.finfo(real_dtype).max
            min_num = numx.finfo(real_dtype).min
        self.M = numx.array([min_num]*n, dtype=real_dtype)
        self.m = numx.array([max_num]*n, dtype=real_dtype)
        
        self.lM = 0
        self.lm = 0
Пример #9
0
    def _train(self, x, y):
        """
        :param x: Array of different input observations.
        :type x: numpy.ndarray

        :param y: Array of size (x.shape[0], output_dim) that contains the 
            observed output to the input x's.
        :type y: numpy.ndarray
        """
        # initialize internal vars if necessary
        if self._xTx is None:
            if self.with_bias:
                x_size = self._input_dim + 1
            else:
                x_size = self._input_dim
            self._xTx = numx.zeros((x_size, x_size), self._dtype)
            self._xTy = numx.zeros((x_size, self._output_dim), self._dtype)

        if self.with_bias:
            x = self._add_constant(x)

        # update internal variables
        self._xTx += mult(x.T, x)
        self._xTy += mult(x.T, y)
        self._tlen += x.shape[0]
Пример #10
0
 def test_incompatible_arrays(self):
     """Test with incompatible arrays."""
     rescont = MessageResultContainer()
     msgs = [{"a": np.zeros((10, 3))}, {"a": np.zeros((10, 4))}]
     for msg in msgs:
         rescont.add_message(msg)
     pytest.raises(ValueError, rescont.get_message)
Пример #11
0
 def _init_internals(self):
     input_dim = self.input_dim
     self._mean = numx.zeros((input_dim,), dtype='d')
     self._var = numx.zeros((input_dim,), dtype='d')
     self._tlen = 0
     self._diff2 = numx.zeros((input_dim,), dtype='d')
     self._initialized = 1
Пример #12
0
 def test_mixed_dict(self):
     """Test msg being a dict containing an array."""
     rescont = MessageResultContainer()
     msg1 = {
         "f": 2,
         "a": np.zeros((10,3), 'int'),
         "b": "aaa",
         "c": 1,
     }
     msg2 = {
         "a": np.ones((15,3), 'int'),
         "b": "bbb",
         "c": 3,
         "d": 1,
     }
     rescont.add_message(msg1)
     rescont.add_message(msg2)
     combined_msg = rescont.get_message()
     a = np.zeros((25,3), 'int')
     a[10:] = 1
     reference_msg = {"a": a, "c": 4, "b": "aaabbb", "d": 1, "f": 2}
     assert np.all(reference_msg["a"] == reference_msg["a"])
     combined_msg.pop("a")
     reference_msg.pop("a")
     assert combined_msg == reference_msg
Пример #13
0
    def _train(self, x, y):
        """
        :param x: Array of different input observations.
        :type x: numpy.ndarray

        :param y: Array of size (x.shape[0], output_dim) that contains the 
            observed output to the input x's.
        :type y: numpy.ndarray
        """
        # initialize internal vars if necessary
        if self._xTx is None:
            if self.with_bias:
                x_size = self._input_dim + 1
            else:
                x_size = self._input_dim
            self._xTx = numx.zeros((x_size, x_size), self._dtype)
            self._xTy = numx.zeros((x_size, self._output_dim), self._dtype)

        if self.with_bias:
            x = self._add_constant(x)

        # update internal variables
        self._xTx += mult(x.T, x)
        self._xTy += mult(x.T, y)
        self._tlen += x.shape[0]
Пример #14
0
 def output_sizes(self, n):
     """Return the individual output sizes of each expansion function
     when the input has lenght n."""
     sizes = numx.zeros(len(self.funcs))
     x = numx.zeros((1,n))
     for i, func in enumerate(self.funcs):
         outx = func(x)
         sizes[i] = outx.shape[1]
     return sizes
Пример #15
0
    def _train(self, x, y):
        # initialize internal vars if necessary
        if self._xTx is None:
            x_size = self._input_dim + 1
            self._xTx = numx.zeros((x_size, x_size), self._dtype)
            self._xTy = numx.zeros((x_size, self._output_dim), self._dtype)

        x = self._add_constant(x)

        # update internal variables
        self._xTx += mult(x.T, x)
        self._xTy += mult(x.T, y)
        self._tlen += x.shape[0]
Пример #16
0
    def _execute(self, x):
        assert x.shape[0] == 1

        if self.sliding_wnd == None:
            self._init_sliding_window()

        gap = self.gap
        rows = self.sliding_wnd.shape[0]
        cols = self.output_dim
        n = self.input_dim

        new_row = numx.zeros(cols, dtype=self.dtype)
        new_row[:n] = x

        # Slide
        if self.slide:
            self.sliding_wnd[:-1, :] = self.sliding_wnd[1:, :]

        # Delay
        if self.cur_idx-gap >= 0:
            new_row[n:] = self.sliding_wnd[self.cur_idx-gap, :-n]

        # Add new row to matrix
        self.sliding_wnd[self.cur_idx, :] = new_row

        if self.cur_idx < rows-1:
            self.cur_idx = self.cur_idx+1 
        else:
            self.slide = True

        return new_row[numx.newaxis,:]
Пример #17
0
 def output_sizes(self, n):
     """Return the individual output sizes of each expansion function
     when the input has lenght n.
     
     :param n: Input dimension,
     :type: int
     
     :return: The individual output sizes of each expansion function.
     :rtype: list
     """
     sizes = numx.zeros(len(self.funcs), dtype=numx.int64)
     x = numx.zeros((1, n))
     for i, func in enumerate(self.funcs):
         outx = func(x)
         sizes[i] = outx.shape[1]
     return sizes
Пример #18
0
    def train(self, func, x0):
        """Optimize parameters to minimze loss.

        Arguments:
            - func: A function of the parameters that returns a tuple with the gradient and the loss respectively.
            - x0: Parameters to use as starting point.

        Returns the parameters that minimize the loss.
        """
        if self.dparams is None:
            self.dparams = numx.zeros(x0.shape)

        updated_params = x0

        for _ in range(self.epochs):
            gradient, e = func(updated_params)
            self.dparams = self.momentum * self.dparams - self.learning_rate * gradient
            # TODO: how do we make sure that we do not decay the bias terms?
            updated_params += self.dparams - self.decay * updated_params
            self.learning_rate *= self.learning_rate_decay
            self._n_iter += 1
            if self.verbose_iter > 0:
                if e == None:
                    import warnings
                    warnings.warn('No error measure declared.')
                else:
                    self._error += e
                    if numx.mod(self._n_iter, self.verbose_iter) == 0:
                        print 'Mean err. last', self.verbose_iter, 'of', self._n_iter, ':', self._error / self.verbose_iter
                        self._error = 0

        return updated_params
Пример #19
0
    def _execute(self, x):
        assert x.shape[0] == 1

        if self.sliding_wnd == None:
            self._init_sliding_window()

        gap = self.gap
        rows = self.sliding_wnd.shape[0]
        cols = self.output_dim
        n = self.input_dim

        new_row = numx.zeros(cols, dtype=self.dtype)
        new_row[:n] = x

        # Slide
        if self.slide:
            self.sliding_wnd[:-1, :] = self.sliding_wnd[1:, :]

        # Delay
        if self.cur_idx-gap >= 0:
            new_row[n:] = self.sliding_wnd[self.cur_idx-gap, :-n]

        # Add new row to matrix
        self.sliding_wnd[self.cur_idx, :] = new_row

        if self.cur_idx < rows-1:
            self.cur_idx = self.cur_idx+1 
        else:
            self.slide = True

        return new_row[numx.newaxis,:]
Пример #20
0
    def _sample_v(self, h, sample_l=False, concatenate=True):
        # returns  P(v=1|h,W,b), a sample from it, P(l=1|h,W,b),
        # and a sample from it

        ldim, vdim = self._labels_dim, self._visible_dim

        # activation
        a = self.bv + mult(h, self.w.T)
        av, al = a[:, :vdim], a[:, vdim:]

        # ## visible units: logistic activation
        probs_v = old_div(1., (1. + exp(-av)))
        v = (probs_v > random(probs_v.shape)).astype('d')

        # ## label units: softmax activation
        # subtract maximum to regularize exponent
        exponent = al - rrep(al.max(axis=1), ldim)
        probs_l = exp(exponent)
        probs_l /= rrep(probs_l.sum(axis=1), ldim)

        if sample_l:
            # ?? todo: I'm sure this can be optimized
            l = numx.zeros((h.shape[0], ldim))
            for t in range(h.shape[0]):
                l[t, :] = mdp.numx_rand.multinomial(1, probs_l[t, :])
        else:
            l = probs_l.copy()

        if concatenate:
            probs = numx.concatenate((probs_v, probs_l), axis=1)
            x = numx.concatenate((v, l), axis=1)
            return probs, x
        else:
            return probs_v, probs_l, v, l
Пример #21
0
 def _get_rnd_permutation(self, dim):
     # return a random permut matrix with the right dimensions and type
     zero = numx.zeros((dim, dim), dtype=self.dtype)
     row = numx_rand.permutation(dim)
     for col in range(dim):
         zero[row[col], col] = 1.
     return zero
Пример #22
0
    def get_quadratic_form(self, nr):
        """Return the matrix H, the vector f and the constant c of the
        quadratic form 1/2 x'Hx + f'x + c that defines the output
        of the component 'nr' of the SFA node.

        :param nr: The component 'nr' of the SFA node.

        :returns: The matrix H, the vector f and the constant c of the
            quadratic form.
        :rtype: numpy.ndarray, numpy.ndarray, float
        """
        if self.sf is None:
            self._if_training_stop_training()

        sf = self.sf[:, nr]
        c = -mult(self.avg, sf)
        n = self.input_dim
        f = sf[:n]
        h = numx.zeros((n, n), dtype=self.dtype)
        k = n
        for i in range(n):
            for j in range(n):
                if j > i:
                    h[i, j] = sf[k]
                    k = k + 1
                elif j == i:
                    h[i, j] = 2 * sf[k]
                    k = k + 1
                else:
                    h[i, j] = h[j, i]

        return QuadraticForm(h, f, c, dtype=self.dtype)
Пример #23
0
    def _execute(self, x):
        #----------------------------------------------------
        # similar algorithm to that within self.stop_training()
        #  refer there for notes & comments on code
        #----------------------------------------------------
        N = self.data.shape[0]
        Nx = x.shape[0]
        W = numx.zeros((Nx, N), dtype=self.dtype)

        k, r = self.k, self.r
        d_out = self.output_dim
        Q_diag_idx = numx.arange(k)

        for row in range(Nx):
            #find nearest neighbors of x in M
            M_xi = self.data - x[row]
            nbrs = numx.argsort((M_xi**2).sum(1))[:k]
            M_xi = M_xi[nbrs]

            #find corrected covariance matrix Q
            Q = mult(M_xi, M_xi.T)
            if r is None and k > d_out:
                sig2 = (svd(M_xi, compute_uv=0))**2
                r = numx.sum(sig2[d_out:])
                Q[Q_diag_idx, Q_diag_idx] += r
            if r is not None:
                Q[Q_diag_idx, Q_diag_idx] += r

            #solve for weights
            w = self._refcast(numx_linalg.solve(Q, numx.ones(k)))
            w /= w.sum()
            W[row, nbrs] = w

        #multiply weights by result of SVD from training
        return numx.dot(W, self.training_projection)
Пример #24
0
 def _get_rnd_permutation(self, dim):
     # return a random permut matrix with the right dimensions and type
     zero = numx.zeros((dim, dim), dtype=self.dtype)
     row = numx_rand.permutation(dim)
     for col in range(dim):
         zero[row[col], col] = 1.
     return zero
Пример #25
0
 def output_sizes(self, n):
     """Return the individual output sizes of each expansion function
     when the input has lenght n.
     
     :param n: Input dimension,
     :type: int
     
     :return: The individual output sizes of each expansion function.
     :rtype: list
     """
     sizes = numx.zeros(len(self.funcs), dtype=numx.int64)
     x = numx.zeros((1,n))
     for i, func in enumerate(self.funcs):
         outx = func(x)
         sizes[i] = outx.shape[1]
     return sizes
Пример #26
0
    def _execute(self, x):
        #----------------------------------------------------
        # similar algorithm to that within self.stop_training()
        #  refer there for notes & comments on code
        #----------------------------------------------------
        N = self.data.shape[0]
        Nx = x.shape[0]
        W = numx.zeros((Nx, N), dtype=self.dtype)

        k, r = self.k, self.r
        d_out = self.output_dim
        Q_diag_idx = numx.arange(k)

        for row in range(Nx):
            #find nearest neighbors of x in M
            M_xi = self.data-x[row]
            nbrs = numx.argsort( (M_xi**2).sum(1) )[:k]
            M_xi = M_xi[nbrs]

            #find corrected covariance matrix Q
            Q = mult(M_xi, M_xi.T)
            if r is None and k > d_out:
                sig2 = (svd(M_xi, compute_uv=0))**2
                r = numx.sum(sig2[d_out:])
                Q[Q_diag_idx, Q_diag_idx] += r
            if r is not None:
                Q[Q_diag_idx, Q_diag_idx] += r

            #solve for weights
            w = self._refcast(numx_linalg.solve(Q , numx.ones(k)))
            w /= w.sum()
            W[row, nbrs] = w

        #multiply weights by result of SVD from training
        return numx.dot(W, self.training_projection)
Пример #27
0
    def get_quadratic_form(self, nr):
        """
        Return the matrix H, the vector f and the constant c of the
        quadratic form 1/2 x'Hx + f'x + c that defines the output
        of the component 'nr' of the SFA node.
        """
        if self.sf is None:
            self._if_training_stop_training()

        sf = self.sf[:, nr]
        c = -mult(self.avg, sf)
        n = self.input_dim
        f = sf[:n]
        h = numx.zeros((n, n), dtype=self.dtype)
        k = n
        for i in range(n):
            for j in range(n):
                if j > i:
                    h[i, j] = sf[k]
                    k = k+1
                elif j == i:
                    h[i, j] = 2*sf[k]
                    k = k+1
                else:
                    h[i, j] = h[j, i]

        return QuadraticForm(h, f, c, dtype=self.dtype)
Пример #28
0
    def _sample_v(self, h, sample_l=False, concatenate=True):
        # returns  P(v=1|h,W,b), a sample from it, P(l=1|h,W,b),
        # and a sample from it

        ldim, vdim = self._labels_dim, self._visible_dim

        # activation
        a = self.bv + mult(h, self.w.T)
        av, al = a[:, :vdim], a[:, vdim:]

        # ## visible units: logistic activation
        probs_v = old_div(1.,(1. + exp(-av)))
        v = (probs_v > random(probs_v.shape)).astype('d')

        # ## label units: softmax activation
        # subtract maximum to regularize exponent
        exponent = al - rrep(al.max(axis=1), ldim)
        probs_l = exp(exponent)
        probs_l /= rrep(probs_l.sum(axis=1), ldim)

        if sample_l:
            # ?? todo: I'm sure this can be optimized
            l = numx.zeros((h.shape[0], ldim))
            for t in range(h.shape[0]):
                l[t, :] = mdp.numx_rand.multinomial(1, probs_l[t, :])
        else:
            l = probs_l.copy()

        if concatenate:
            probs = numx.concatenate((probs_v, probs_l), axis=1)
            x = numx.concatenate((v, l), axis=1)
            return probs, x
        else:
            return probs_v, probs_l, v, l
Пример #29
0
    def __init__(self, nodes, dtype=None):
        """Setup the layer with the given list of nodes.

        The input and output dimensions for the nodes must be already set
        (the output dimensions for simplicity reasons). The training phases for
        the nodes are allowed to differ.

        Keyword arguments:
        nodes -- List of the nodes to be used.
        """
        self.nodes = nodes
        # check nodes properties and get the dtype
        dtype = self._check_props(dtype)
        # calculate the the dimensions
        self.node_input_dims = numx.zeros(len(self.nodes))
        input_dim = 0
        for index, node in enumerate(nodes):
            input_dim += node.input_dim
            self.node_input_dims[index] = node.input_dim
        output_dim = self._get_output_dim_from_nodes()

        # set layer state
        nodes_is_training = [node.is_training() for node in nodes]
        if mdp.numx.any(nodes_is_training):
            self._is_trainable = True
            self._training = True
        else:
            self._is_trainable = False
            self._training = False

        super(Layer, self).__init__(input_dim=input_dim,
                                    output_dim=output_dim,
                                    dtype=dtype)
Пример #30
0
 def _update_mean(self, x, label):
     """Update the mean with data for a single label."""
     if label not in self.label_means:
         self.label_means[label] = numx.zeros(self.input_dim)
         self.n_label_samples[label] = 0
     # TODO: use smarter summing to avoid rounding errors
     self.label_means[label] += numx.sum(x, axis=0)
     self.n_label_samples[label] += len(x)
Пример #31
0
 def _update_mean(self, x, label):
     """Update the mean with data for a single label."""
     if label not in self.label_means:
         self.label_means[label] = numx.zeros(self.input_dim)
         self.n_label_samples[label] = 0
     # TODO: use smarter summing to avoid rounding errors
     self.label_means[label] += numx.sum(x, axis=0)
     self.n_label_samples[label] += len(x)
Пример #32
0
 def _get_contrast(self, covs, bica_bsfa = None):
     if bica_bsfa is None:
         bica_bsfa = self._bica_bsfa
     # return current value of the contrast
     R = self.output_dim
     ncovs = covs.ncovs
     covs = covs.covs
     icaweights = self.icaweights
     sfaweights = self.sfaweights
     # unpack the bsfa and bica coefficients
     bica, bsfa = bica_bsfa
     sfa = numx.zeros((ncovs, ), dtype=self.dtype)
     ica = numx.zeros((ncovs, ), dtype=self.dtype)
     for t in range(ncovs):
         sq_corr =  covs[:R, :R, t]*covs[:R, :R, t]
         sfa[t] = sq_corr.trace()
         ica[t] = 2*_triu(sq_corr, 1).ravel().sum()
     return (bsfa*sfaweights*sfa).sum(), (bica*icaweights*ica).sum()
Пример #33
0
 def _get_contrast(self, covs, bica_bsfa = None):
     if bica_bsfa is None:
         bica_bsfa = self._bica_bsfa
     # return current value of the contrast
     R = self.output_dim
     ncovs = covs.ncovs
     covs = covs.covs
     icaweights = self.icaweights
     sfaweights = self.sfaweights
     # unpack the bsfa and bica coefficients
     bica, bsfa = bica_bsfa
     sfa = numx.zeros((ncovs, ), dtype=self.dtype)
     ica = numx.zeros((ncovs, ), dtype=self.dtype)
     for t in range(ncovs):
         sq_corr =  covs[:R, :R, t]*covs[:R, :R, t]
         sfa[t] = sq_corr.trace()
         ica[t] = 2*_triu(sq_corr, 1).ravel().sum()
     return (bsfa*sfaweights*sfa).sum(), (bica*icaweights*ica).sum()
Пример #34
0
 def _execute(self, x):
     gap = self.gap
     tf = x.shape[0] - (self.time_frames-1)*gap
     rows = self.input_dim
     cols = self.output_dim
     y = numx.zeros((tf, cols), dtype=self.dtype)
     for frame in range(self.time_frames):
         y[:, frame*rows:(frame+1)*rows] = x[gap*frame:gap*frame+tf, :]
     return y
Пример #35
0
 def _execute(self, x):
     gap = self.gap
     tf = x.shape[0] - (self.time_frames-1)*gap
     rows = self.input_dim
     cols = self.output_dim
     y = numx.zeros((tf, cols), dtype=self.dtype)
     for frame in range(self.time_frames):
         y[:, frame*rows:(frame+1)*rows] = x[gap*frame:gap*frame+tf, :]
     return y
Пример #36
0
 def test_execute_routing(self):
     """Test the standard routing for messages."""
     sboard = BiSwitchboard(input_dim=3, connections=[2,0,1])
     x = n.array([[1,2,3],[4,5,6]])
     msg = {
         "string": "blabla",
         "list": [1,2],
         "data": x.copy(),  # should be mapped by switchboard
         "data2": n.zeros(3),  # should not be modified
         "data3": n.zeros((3,4)),  # should not be modified
     }
     y, out_msg = sboard.execute(x, msg)
     reference_y = n.array([[3,1,2],[6,4,5]])
     assert (y == reference_y).all()
     assert out_msg["string"] == msg["string"]
     assert out_msg["list"] == msg["list"]
     assert n.all(out_msg["data"] == reference_y)
     assert out_msg["data2"].shape == (3,)
     assert out_msg["data3"].shape == (3,4)
Пример #37
0
 def test_execute_routing(self):
     """Test the standard routing for messages."""
     sboard = BiSwitchboard(input_dim=3, connections=[2,0,1])
     x = n.array([[1,2,3],[4,5,6]])
     msg = {
         "string": "blabla",
         "list": [1,2],
         "data": x.copy(),  # should be mapped by switchboard
         "data2": n.zeros(3),  # should not be modified
         "data3": n.zeros((3,4)),  # should not be modified
     }
     y, out_msg = sboard.execute(x, msg)
     reference_y = n.array([[3,1,2],[6,4,5]])
     assert (y == reference_y).all()
     assert out_msg["string"] == msg["string"]
     assert out_msg["list"] == msg["list"]
     assert n.all(out_msg["data"] == reference_y)
     assert out_msg["data2"].shape == (3,)
     assert out_msg["data3"].shape == (3,4)
Пример #38
0
 def _execute(self, x):
     y = numx.zeros((x.shape[0], self._output_dim), dtype = self.dtype)
     c, s = self._centers, self._sizes
     for i in range(self._output_dim):
         dist = x - c[i,:]
         if self._isotropic:
             tmp = (dist**2.).sum(axis=1) / s[i]
         else:
             tmp = (dist*matmult(dist, s[i,:,:])).sum(axis=1)
         y[:,i] = numx.exp(-0.5*tmp)
     return y
Пример #39
0
def _mgs(a):
    m, n = a.shape
    v = a.copy()
    r = numx.zeros((n, n))
    for i in range(n):
        r[i, i] = numx_linalg.norm(v[:, i])
        v[:, i] = v[:, i]/r[i, i]
        for j in range(i+1, n):
            r[i, j] = mult(v[:, i], v[:, j])
            v[:, j] = v[:, j] - r[i, j]*v[:, i]
    # q is v
    return v, r
Пример #40
0
    def _execute(self, x):
        gap = self.gap
        rows = x.shape[0]
        cols = self.output_dim
        n = self.input_dim

        y = numx.zeros((rows, cols), dtype=self.dtype)

        for frame in range(self.time_frames):
            y[gap*frame:, frame*n:(frame+1)*n] = x[:rows-gap*frame, :]

        return y
Пример #41
0
 def _label(self, x, threshold=0):
     """Retrieves patterns from the associative memory.
     
     :param x: A matrix having different variables on different columns
         and observations on rows.
     :param threshold: numpy.ndarray
     :return: The patterns.
     """
     # todo: consider iterables
     threshold = numx.zeros(self.input_dim) + threshold
     return numx.array(
         [self._label_one(pattern, threshold) for pattern in x])
Пример #42
0
def _mgs(a):
    m, n = a.shape
    v = a.copy()
    r = numx.zeros((n, n))
    for i in range(n):
        r[i, i] = numx_linalg.norm(v[:, i])
        v[:, i] = v[:, i] / r[i, i]
        for j in range(i + 1, n):
            r[i, j] = mult(v[:, i], v[:, j])
            v[:, j] = v[:, j] - r[i, j] * v[:, i]
    # q is v
    return v, r
Пример #43
0
    def _execute(self, x):
        gap = self.gap
        rows = x.shape[0]
        cols = self.output_dim
        n = self.input_dim

        y = numx.zeros((rows, cols), dtype=self.dtype)

        for frame in range(self.time_frames):
            y[gap*frame:, frame*n:(frame+1)*n] = x[:rows-gap*frame, :]

        return y
Пример #44
0
 def test_inverse_message_routing(self):
     """Test the inverse routing for messages."""
     sboard = BiSwitchboard(input_dim=3, connections=[2,0,1])
     x = n.array([[1,2,3],[4,5,6]])
     msg = {
         "string": "blabla",
         "method": "inverse",
         "list": [1,2],
         "data": x,  # should be mapped by switchboard
         "data2": n.zeros(3),  # should not be modified
         "data3": n.zeros((3,4)),  # should not be modified
         "target": "test"
     }
     y, out_msg, target = sboard.execute(None, msg)
     assert y is None
     assert target == "test"
     reference_y = n.array([[2,3,1],[5,6,4]])
     assert out_msg["string"] == msg["string"]
     assert out_msg["list"] == msg["list"]
     assert (out_msg["data"] == reference_y).all()
     assert out_msg["data2"].shape == (3,)
     assert out_msg["data3"].shape == (3,4)
Пример #45
0
def _mgs(a):
    """Modified Gram-Schmidt."""
    m, n = a.shape
    v = a.copy()
    r = numx.zeros((n, n))
    for i in range(n):
        r[i, i] = numx_linalg.norm(v[:, i])
        v[:, i] = old_div(v[:, i],r[i, i])
        for j in range(i+1, n):
            r[i, j] = mult(v[:, i], v[:, j])
            v[:, j] = v[:, j] - r[i, j]*v[:, i]
    # q is v
    return v, r
Пример #46
0
 def test_inverse_message_routing(self):
     """Test the inverse routing for messages."""
     sboard = BiSwitchboard(input_dim=3, connections=[2,0,1])
     x = n.array([[1,2,3],[4,5,6]])
     msg = {
         "string": "blabla",
         "method": "inverse",
         "list": [1,2],
         "data": x,  # should be mapped by switchboard
         "data2": n.zeros(3),  # should not be modified
         "data3": n.zeros((3,4)),  # should not be modified
         "target": "test"
     }
     y, out_msg, target = sboard.execute(None, msg)
     assert y is None
     assert target == "test"
     reference_y = n.array([[2,3,1],[5,6,4]])
     assert out_msg["string"] == msg["string"]
     assert out_msg["list"] == msg["list"]
     assert (out_msg["data"] == reference_y).all()
     assert out_msg["data2"].shape == (3,)
     assert out_msg["data3"].shape == (3,4)
Пример #47
0
 def _update_mean(self, x, label):
     """Update the mean with data for a single label.
     
     :param x: The data.
     :type x: numpy.ndarray
     :param label: The label index.
     """
     if label not in self.label_means:
         self.label_means[label] = numx.zeros(self.input_dim)
         self.n_label_samples[label] = 0
     # TODO: use smarter summing to avoid rounding errors
     self.label_means[label] += numx.sum(x, axis=0)
     self.n_label_samples[label] += len(x)
Пример #48
0
def _mgs(a):
    """Modified Gram-Schmidt."""
    m, n = a.shape
    v = a.copy()
    r = numx.zeros((n, n))
    for i in range(n):
        r[i, i] = numx_linalg.norm(v[:, i])
        v[:, i] = old_div(v[:, i], r[i, i])
        for j in range(i + 1, n):
            r[i, j] = mult(v[:, i], v[:, j])
            v[:, j] = v[:, j] - r[i, j] * v[:, i]
    # q is v
    return v, r
Пример #49
0
    def _execute(self, x):
        if self.input_dim is None:
            self.set_input_dim(x.shape[1])

        num_samples = x.shape[0]
        sizes = self.output_sizes(self.input_dim)

        out = numx.zeros((num_samples, self.output_dim), dtype=self.dtype)

        current_pos = 0
        for i, func in enumerate(self.funcs):
            out[:,current_pos:current_pos+sizes[i]] = func(x)
            current_pos += sizes[i]
        return out
Пример #50
0
    def class_probabilities(self, x):
        """Return the posterior probability of each class given the input."""
        self._pre_execution_checks(x)

        # compute the probability for each class
        tmp_prob = numx.zeros((x.shape[0], len(self.labels)), dtype=self.dtype)
        for i in range(len(self.labels)):
            tmp_prob[:, i] = self._gaussian_prob(x, i)
            tmp_prob[:, i] *= self.p[i]

        # normalize to probability 1
        # (not necessary, but sometimes useful)
        tmp_tot = tmp_prob.sum(axis=1)
        tmp_tot = tmp_tot[:, numx.newaxis]
        return tmp_prob / tmp_tot
Пример #51
0
    def class_probabilities(self, x):
        """Return the posterior probability of each class given the input."""
        self._pre_execution_checks(x)

        # compute the probability for each class
        tmp_prob = numx.zeros((x.shape[0], len(self.labels)), dtype=self.dtype)
        for i in range(len(self.labels)):
            tmp_prob[:, i] = self._gaussian_prob(x, i)
            tmp_prob[:, i] *= self.p[i]

        # normalize to probability 1
        # (not necessary, but sometimes useful)
        tmp_tot = tmp_prob.sum(axis=1)
        tmp_tot = tmp_tot[:, numx.newaxis]
        return tmp_prob / tmp_tot
Пример #52
0
    def _train(self, x, y):
        """
        **Additional input arguments**

        y
          array of size (x.shape[0], output_dim) that contains the observed
          output to the input x's.
        """
        # initialize internal vars if necessary
        if self._xTx is None:
            if self.with_bias:
                x_size = self._input_dim + 1
            else:
                x_size = self._input_dim
            self._xTx = numx.zeros((x_size, x_size), self._dtype)
            self._xTy = numx.zeros((x_size, self._output_dim), self._dtype)

        if self.with_bias:
            x = self._add_constant(x)

        # update internal variables
        self._xTx += mult(x.T, x)
        self._xTy += mult(x.T, y)
        self._tlen += x.shape[0]
Пример #53
0
 def _execute(self, x, *args, **kwargs):
     """Process the data through the internal nodes."""
     out_start = 0
     out_stop = 0
     y = None
     for node in self.nodes:
         out_start = out_stop
         out_stop += node.output_dim
         if y is None:
             node_y = node.execute(x, *args, **kwargs)
             y = numx.zeros([node_y.shape[0], self.output_dim],
                            dtype=node_y.dtype)
             y[:,out_start:out_stop] = node_y
         else:
             y[:,out_start:out_stop] = node.execute(x, *args, **kwargs)
     return y
def test_NormalizingRecursiveExpansionNode():
    """Essentially testing the domain transformation."""
    degree = 10
    episodes = 5
    num_obs = 500
    num_vars = 4

    for func_name in recfs:
        x = np.zeros((0, num_vars))
        expn = NormalizingRecursiveExpansionNode(degree, recf=func_name,
                                                 check=True, with0=True)
        for i in range(episodes):
            chunk = (np.random.rand(num_obs, num_vars)-0.5)*1000
            expn.train(chunk)
            x = np.concatenate((x, chunk), axis=0)
        expn.stop_training()
        expn.execute(x)
Пример #55
0
    def get_minima(self):
        """
        Return the tuple (minima, indices).
        Minima are sorted in ascending order.

        If the training phase has not been completed yet, call
        stop_training.
        """
        self._if_training_stop_training()
        cols = self.input_dim
        n = self.n
        hit = self.hit
        im = numx.zeros((n, cols), dtype=self.itype)
        m = numx.ones((n, cols), dtype=self.dtype)
        for c in range(cols):
            m[:, c], im[:, c] = hit[c].get_minima()
        return m, im
def get_handcomputed_function_tensor(x, func, degree):
    """x must be of shape (4,)."""
    outtensor = np.zeros((degree+1,)*4)

    outtensor[:, 0, 0, 0] = func(x[np.newaxis, 0], degree)
    outtensor[0, :, 0, 0] = func(x[np.newaxis, 1], degree)
    outtensor[0, 0, :, 0] = func(x[np.newaxis, 2], degree)
    outtensor[0, 0, 0, :] = func(x[np.newaxis, 3], degree)
    for i in range(degree+1):
        outtensor[:, i, 0, 0] = outtensor[:, 0, 0, 0]*outtensor[0, i, 0, 0]

    for i in range(degree+1):
        outtensor[:, :, i, 0] = outtensor[:, :, 0, 0] * outtensor[0, 0, i, 0]

    for i in range(degree+1):
        outtensor[:, :, :, i] = outtensor[:, :, :, 0] * outtensor[0, 0, 0, i]

    return outtensor