Beispiel #1
0
 def _execute(self, x):
     y = numx.zeros((x.shape[0], self._output_dim), dtype = self.dtype)
     c, s = self._centers, self._sizes
     for i in range(self._output_dim):
         dist = x - c[i,:]
         if self._isotropic:
             tmp = (dist**2.).sum(axis=1) / s[i]
         else:
             tmp = (dist*matmult(dist, s[i,:,:])).sum(axis=1)
         y[:,i] = numx.exp(-0.5*tmp)
     return y
Beispiel #2
0
    def _gaussian_prob(self, x, lbl_idx):
        """Return the probability of the data points x with respect to a
        gaussian.

        Input arguments:
        x -- Input data
        S -- Covariance matrix
        mn -- Mean
        """
        x = self._refcast(x)

        dim = self.input_dim
        sqrt_detS = self._sqrt_def_covs[lbl_idx]
        invS = self.inv_covs[lbl_idx]
        # subtract the mean
        x_mn = x - self.means[lbl_idx][numx.newaxis, :]
        # exponent
        exponent = -0.5 * (utils.mult(x_mn, invS) * x_mn).sum(axis=1)
        # constant
        constant = (2. * numx.pi)**(-dim / 2.) / sqrt_detS
        # probability
        return constant * numx.exp(exponent)
Beispiel #3
0
    def _gaussian_prob(self, x, lbl_idx):
        """Return the probability of the data points x with respect to a
        gaussian.

        Input arguments:
        x -- Input data
        S -- Covariance matrix
        mn -- Mean
        """
        x = self._refcast(x)

        dim = self.input_dim
        sqrt_detS = self._sqrt_def_covs[lbl_idx]
        invS = self.inv_covs[lbl_idx]
        # subtract the mean
        x_mn = x - self.means[lbl_idx][numx.newaxis, :]
        # exponent
        exponent = -0.5 * (utils.mult(x_mn, invS) * x_mn).sum(axis=1)
        # constant
        constant = (2.0 * numx.pi) ** (-dim / 2.0) / sqrt_detS
        # probability
        return constant * numx.exp(exponent)
Beispiel #4
0
    def _gaussian_prob(self, x, lbl_idx):
        """Return the probability of the data points x with respect to a
        gaussian.
        
        :param x: Input data
        :type x: numpy.ndarray
        :param lbl_idx: 
        :return: The probability of the data points x with respect to a
            gaussian
        :rtype: float
        """
        x = self._refcast(x)

        dim = self.input_dim
        sqrt_detS = self._sqrt_def_covs[lbl_idx]
        invS = self.inv_covs[lbl_idx]
        # subtract the mean
        x_mn = x - self.means[lbl_idx][numx.newaxis, :]
        # exponent
        exponent = -0.5 * (utils.mult(x_mn, invS) * x_mn).sum(axis=1)
        # constant
        constant = old_div((2. * numx.pi)**(old_div(-dim, 2.)), sqrt_detS)
        # probability
        return constant * numx.exp(exponent)
    def _train(self, input):
        g = self.graph

        if len(g.nodes) == 0:
            # if missing, generate num_nodes initial nodes at random
            # assuming that the input data has zero mean and unit variance,
            # choose the random position according to a gaussian distribution
            # with zero mean and unit variance
            normal = numx_rand.normal
            for node_ind in range(self.num_nodes):
                self._add_node(self._refcast(normal(0.0, 1.0, self.input_dim)))

        epoch = self.epoch
        e_i = self.epsilon_i
        e_f = self.epsilon_f
        l_i = self.lambda_i
        l_f = self.lambda_f
        T_i = float(self.max_age_i)
        T_f = float(self.max_age_f)
        max_epochs = float(self.max_epochs)
        remaining_epochs = self.n_epochs_to_train
        while remaining_epochs > 0:
            # reset permutation of data points
            di = numx.random.permutation(input)
            if epoch < max_epochs:
                denom = epoch/max_epochs
            else:
                denom = 1.
            epsilon = e_i * ((e_f/e_i)**denom)
            lmbda = l_i * ((l_f/l_i)**denom)
            T = T_i * ((T_f/T_i)**denom)
            epoch += 1
            for x in di:
                # Step 1 rank nodes according to their distance to random point
                ranked_nodes = self._rank_nodes_by_distance(x)

                # Step 2 move nodes
                for rank,node in enumerate(ranked_nodes):
                    #TODO: cut off at some rank when using many nodes
                    #TODO: check speedup by vectorizing
                    delta_w = epsilon * numx.exp(-rank / lmbda) * \
                                    (x - node.data.pos)
                    node.data.pos += delta_w

                # Step 3 update edge weight
                for e in g.edges:
                    e.data.inc_age()

                # Step 4 set age of edge between first two nodes to zero
                #  or create it if it doesn't exist.
                n0 = ranked_nodes[0]
                n1 = ranked_nodes[1]
                nn = n0.neighbors()
                if n1 in nn:
                    edges = n0.get_edges(neighbor=n1)
                    edges[0].data.age = 0  # should only be one edge
                else:
                    self._add_edge(n0, n1)

                # step 5 delete edges with age > max_age
                self._remove_old_edges(max_age=T)
            remaining_epochs -= 1
        self.epoch = epoch
    def _train(self, input):
        g = self.graph

        if len(g.nodes) == 0:
            # if missing, generate num_nodes initial nodes at random
            # assuming that the input data has zero mean and unit variance,
            # choose the random position according to a gaussian distribution
            # with zero mean and unit variance
            normal = numx_rand.normal
            for _ in range(self.num_nodes):
                self._add_node(self._refcast(normal(0.0, 1.0, self.input_dim)))

        epoch = self.epoch
        e_i = self.epsilon_i
        e_f = self.epsilon_f
        l_i = self.lambda_i
        l_f = self.lambda_f
        T_i = float(self.max_age_i)
        T_f = float(self.max_age_f)
        max_epochs = float(self.max_epochs)
        remaining_epochs = self.n_epochs_to_train
        while remaining_epochs > 0:
            # reset permutation of data points
            di = numx.random.permutation(input)
            if epoch < max_epochs:
                denom = old_div(epoch, max_epochs)
            else:
                denom = 1.
            epsilon = e_i * ((old_div(e_f, e_i))**denom)
            lmbda = l_i * ((old_div(l_f, l_i))**denom)
            T = T_i * ((old_div(T_f, T_i))**denom)
            epoch += 1
            for x in di:
                # Step 1 rank nodes according to their distance to random point
                ranked_nodes = self._rank_nodes_by_distance(x)

                # Step 2 move nodes
                for rank, node in enumerate(ranked_nodes):
                    #TODO: cut off at some rank when using many nodes
                    #TODO: check speedup by vectorizing
                    delta_w = epsilon * numx.exp(old_div(-rank, lmbda)) * \
                                    (x - node.data.pos)
                    node.data.pos += delta_w

                # Step 3 update edge weight
                for e in g.edges:
                    e.data.inc_age()

                # Step 4 set age of edge between first two nodes to zero
                #  or create it if it doesn't exist.
                n0 = ranked_nodes[0]
                n1 = ranked_nodes[1]
                nn = n0.neighbors()
                if n1 in nn:
                    edges = n0.get_edges(neighbor=n1)
                    edges[0].data.age = 0  # should only be one edge
                else:
                    self._add_edge(n0, n1)

                # step 5 delete edges with age > max_age
                self._remove_old_edges(max_age=T)
            remaining_epochs -= 1
        self.epoch = epoch