Esempio n. 1
0
def predict_out(predict_in: np.mat):
    """
    输出层的输出
    :param predict_in: 输出层的输入
    :return: result(mat): 输出层的输出
    """
    result = sig(predict_in)
    return result
Esempio n. 2
0
def hidden_out(hidden_in: np.mat):
    """
    隐含层的输出
    :param hidden_in: 隐含层的输入
    :return: hidden_output(mat): 隐含层的输出
    """
    hidden_output = sig(hidden_in)  # 激活函数
    return hidden_output
Esempio n. 3
0
def get_cost(preidct: list, classLabels: list):
    """
    计算预测准确性
    :param preidct: 预测值
    :param classLabels: 标签
    :return:            (float) 计算损失函数的值
    """
    m = len(preidct)
    error = 0.0
    for i in range(m):
        error -= np.log(sig(preidct[i] * classLabels[i]))
    return error
Esempio n. 4
0
def stocGradAscent(dataMatrix: np.mat, classLabels: np.mat, k: int,
                   max_iter: int, alpha: float):
    """
    利用随机梯度下降法训练FM模型
    :param dataMatrix:  特征
    :param classLabels: 标签
    :param k:   v的维数
    :param max_iter:    最大的迭代次数
    :param alpha:   学习率
    :return:                w0(float)  偏置项
                             w(mat)     一次项权重
                             v(mat)     交叉项的权重
    """
    m, n = np.shape(dataMatrix)
    # 1、初始化参数
    w = np.zeros((n, 1))  # 其中n是特征的个数
    w0 = 0  # 偏置项
    v = initialize_v(n, k)  # 初始化v

    # 2、训练
    for it in range(max_iter):
        for x in range(m):  # 随机优化, 对每一个样本而言的
            inter_1 = dataMatrix[x] * v
            inter_2 = np.multiply(dataMatrix[x], dataMatrix[x]) * np.multiply(
                v, v)  # multiply对应元素相乘
            # 完成交叉项
            interaction = np.sum(np.multiply(inter_1, inter_1) -
                                 inter_2) / 2  # 对应模型公式

            p = w0 + dataMatrix[x] * w + interaction  # 计算预测的输出
            loss = sig(classLabels[x] * p[0, 0]) - 1

            w0 -= alpha * loss * classLabels[x]
            for i in range(n):
                if dataMatrix[x, i] != 0:
                    w[i, 0] -= alpha * loss * classLabels[x] * dataMatrix[x, i]

                    for j in range(k):
                        v[i, j] -= alpha * loss * classLabels[x] * (
                            dataMatrix[x, i] * inter_1[0, j] -
                            v[i, j] * dataMatrix[x, i] * dataMatrix[x, i])
        # 计算损失函数的值
        if it % 1000 == 0:
            print(
                "\t---------------- iter: ", it, " , cost: ",
                get_cost(getPrediction(np.mat(dataMatrix), w0, w, v),
                         classLabels))

    # 3、返回最终的FM模型的参数
    return w0, w, v
Esempio n. 5
0
def predict(data: np.mat, w: np.mat):
    """
    对测试数据进行预测
    :param data:    测试数据的特征
    :param w:   模型的参数
    :return:    (mat)   最终的预测结果
    """
    h = FTool.sig(data * w.T)
    m = np.shape(h)[0]
    for i in range(m):
        if h[i, 0] < 0.5:
            h[i, 0] = 0.0
        else:
            h[i, 0] = 1.0
    return h
Esempio n. 6
0
def lr_train_bgd(feature: np.mat, label: np.mat, maxCycle: int, alpha: float):
    """
    利用梯度下降法训练LR模型
    :param feature: 特征
    :param label:   标签
    :param maxCycle:    最大迭代次数
    :param alpha:   学习率
    :return:            w(mat)  权重
    """
    n = np.shape(feature)[1]  # 特征个数
    w = np.mat(np.ones((n, 1)))  # 初始化权重
    i = 0
    while i <= maxCycle:
        i += 1
        h = sig(feature * w)  # 计算Sigmoid值
        err = label - h
        if i % 100 == 0:
            print("\t--------------------------iter=" + str(i) +
                  ", train error rate= " + str(error_rate(h, label)))
        w += alpha * feature.T * err  # 权重修正
    return w
Esempio n. 7
0
def getPrediction(dataMatrix: np.mat, w0: float, w: float, v: float):
    """
    得到预测值
    :param dataMatrix:  特征
    :param w0:  一次项权重
    :param w:   常数项权重
    :param v:   交叉项权重
    :return:
    """
    m = np.shape(dataMatrix)[0]
    result = []
    for x in range(m):

        inter_1 = dataMatrix[x] * v
        inter_2 = np.multiply(dataMatrix[x], dataMatrix[x]) * \
            np.multiply(v, v)  # multiply对应元素相乘
        # 完成交叉项
        interaction = np.sum(np.multiply(inter_1, inter_1) - inter_2) / 2.
        p = w0 + dataMatrix[x] * w + interaction  # 计算预测的输出
        pre = sig(p[0, 0])
        result.append(pre)
    return result