Exemplo n.º 1
0
    def back_propagation(data, labels, thetas, layers):
        num_layers = len(layers)
        (num_examples, num_features) = data.shape
        num_label_types = layers[-1]

        deltas = {}
        #初始化操作
        for layer_index in range(num_layers - 1):
            in_count = layers[layer_index]
            out_count = layers[layer_index + 1]
            deltas[layer_index] = np.zeros(
                (out_count, in_count + 1))  #25*785 10*26
        for example_index in range(num_examples):
            layers_inputs = {}
            layers_activations = {}
            layers_activation = data[example_index, :].reshape(
                (num_features, 1))  #785*1
            layers_activations[0] = layers_activation
            #逐层计算
            for layer_index in range(num_layers - 1):
                layer_theta = thetas[layer_index]  #得到当前权重参数值 25*785   10*26
                layer_input = np.dot(layer_theta,
                                     layers_activation)  #第一次得到25*1 第二次10*1
                layers_activation = np.vstack(
                    (np.array([[1]]), sigmoid(layer_input)))
                layers_inputs[layer_index + 1] = layer_input  #后一层计算结果
                layers_activations[layer_index +
                                   1] = layers_activation  #后一层经过激活函数后的结果
            output_layer_activation = layers_activation[1:, :]

            delta = {}
            #标签处理
            bitwise_label = np.zeros((num_label_types, 1))
            bitwise_label[labels[example_index][0]] = 1
            #计算输出层和真实值之间的差异
            delta[num_layers - 1] = output_layer_activation - bitwise_label

            #遍历循环 L L-1 L-2 ...2
            for layer_index in range(num_layers - 2, 0, -1):
                layer_theta = thetas[layer_index]
                next_delta = delta[layer_index + 1]
                layer_input = layers_inputs[layer_index]
                layer_input = np.vstack((np.array((1)), layer_input))
                #按照公式进行计算
                delta[layer_index] = np.dot(
                    layer_theta.T, next_delta) * sigmoid_gradient(layer_input)
                #过滤掉偏置参数
                delta[layer_index] = delta[layer_index][1:, :]
            for layer_index in range(num_layers - 1):
                layer_delta = np.dot(delta[layer_index + 1],
                                     layers_activations[layer_index].T)
                deltas[layer_index] = deltas[
                    layer_index] + layer_delta  #第一次25*785  第二次10*26

        for layer_index in range(num_layers - 1):

            deltas[layer_index] = deltas[layer_index] * (1 / num_examples)

        return deltas
Exemplo n.º 2
0
 def hypothesis(data, theta):
     """
     计算预测值
     :param data:
     :param theta:
     :return:
     """
     predict = np.dot(data, theta)  # 计算出预测值
     # 逻辑回归需将预测值映射到sigmoid函数
     predict = sigmoid(predict)
     return predict
Exemplo n.º 3
0
    def feedforward_propagation(data, thetas, layers):
        num_layers = len(layers)
        num_examples = data.shape[0]
        in_layer_activation = data

        # 逐层计算
        for layer_index in range(num_layers - 1):
            theta = thetas[layer_index]
            out_layer_activation = sigmoid(np.dot(in_layer_activation,
                                                  theta.T))
            # 正常计算完之后是num_examples*25,但是要考虑偏置项 变成num_examples*26
            out_layer_activation = np.hstack((np.ones(
                (num_examples, 1)), out_layer_activation))
            in_layer_activation = out_layer_activation

        #返回输出层结果,结果中不要偏置项了
        return in_layer_activation[:, 1:]
Exemplo n.º 4
0
 def hypothesis(data, theta):
     prediction = sigmoid(np.dot(data,
                                 theta))  # 将线性输出映射到sigmoid函数,预测得出属于各类别的概率值
     return prediction
Exemplo n.º 5
0
    def hypothesis(data, theta):
        predictions = sigmoid(np.dot(data, theta))

        return predictions
    def calculate_predicitions(self, data, theta):

        predictions = sigmoid(np.dot(data, theta))

        return predictions