Exemplo n.º 1
0
    def gradient(self, x, i):
        # 嵌入参数
        v, h, w = self.__num_visual, self.__num_hidden, self.__num_hidden * self.__num_visual
        self.__weight_v2h = x[0:w].reshape((h, v))
        self.__hidden_bias = x[w:(w + h)]
        self.__visual_bias = x[(w + h):]

        d, s, m = self.points.shape
        i = i % m
        minibatch = self.points[:, :, i]  # 从数据集中取一个minibatch
        h_bias = np.tile(self.__hidden_bias, (1, s))
        v_bias = np.tile(self.__visual_bias, (1, s))

        h_field_0 = sigmoid(self.__weight_v2h.dot(minibatch) + h_bias)
        h_state_0 = sample(h_field_0)
        v_field_1 = sigmoid(self.__weight_v2h.T.dot(h_state_0) + v_bias)
        v_state_1 = v_field_1
        h_field_1 = sigmoid(self.__weight_v2h.dot(v_state_1) + h_bias)
        gw = (h_field_0.dot(minibatch.T) - h_field_1.dot(v_state_1.T)) / s
        gh = (h_field_0 - h_field_1).dot(np.ones((s, 1))) / s
        gv = (minibatch - v_state_1).dot(np.ones((s, 1))) / s

        weight_cost = 1e-4
        cw = weight_cost * self.__weight_v2h
        g = np.vstack(((gw - cw).reshape((-1, 1)), gh, gv))
        return -g
Exemplo n.º 2
0
 def compute(self, points, L=None):
     """多层感知器的计算过程"""
     if L is None:
         L = len(self.b)  # 计算层数
     D, N = points.shape
     y = []  # 初始化输出
     for l in range(L):
         y.append(sigmoid(self.w[l].dot(points) + np.tile(self.b[l], (1, N))))
         points = y[l]
     return y
Exemplo n.º 3
0
    def ffobject(self, x, i):
        # 嵌入参数
        v, h, w = self.__num_visual, self.__num_hidden, self.__num_hidden * self.__num_visual
        self.__weight_v2h = x[0:w].reshape((h, v))
        self.__hidden_bias = x[w:(w + h)]
        self.__visual_bias = x[(w + h):]

        d, s, m = self.points.shape
        i = i % m
        minibatch = self.points[:, :, i]
        h_bias = self.__hidden_bias.dot(np.ones((1, s)))
        v_bias = self.__visual_bias.dot(np.ones((1, s)))

        # 计算重建误差
        h_field_0 = sigmoid(self.__weight_v2h.dot(minibatch) + h_bias)
        h_state_0 = sample(h_field_0)
        v_field_1 = sigmoid(self.__weight_v2h.T.dot(h_state_0) + v_bias)
        y = ((v_field_1 - minibatch) ** 2).sum() / s  # 计算在整个minibatch上的平均重建误差
        return y
Exemplo n.º 4
0
 def posterior(self, v_state):
     # posterior 计算后验概率
     # 在给定显层神经元取值的情况下,计算隐神经元的激活概率
     h_field = sigmoid(self.foreward(v_state))
     return h_field
Exemplo n.º 5
0
 def likelihood(self, h_state):
     # likelihood 计算似然概率
     # 在给定隐层神经元取值的情况下,计算显神经元的激活概率
     v_field = sigmoid(self.backward(h_state))
     return v_field