示例#1
0
文件: fm.py 项目: zumouse/Jtyoui
def stop_grad_ascent(data: np.mat, label: np.mat, k: int, max_iter: int,
                     alpha: float) -> (float, np.mat, np.mat):
    """利用随机梯度下降法训练FM模型

    :param data: 数据特征
    :param label: 标签
    :param k: v的维度
    :param max_iter: 最大迭代次数
    :param alpha: 学习率
    :return: w0,w,v权重
    """
    m, n = np.shape(data)
    w = np.random.randn(n).reshape((n, 1))
    w0 = 0
    v = initialize_v(n, k)
    for it in range(max_iter):
        for x in range(m):
            inter_1 = data[x] * v
            inter_2 = np.multiply(data[x], data[x]) * np.multiply(v, v)
            inter = np.sum(np.multiply(inter_1, inter_1) - inter_2) / 2.
            p = w0 + data[x] * w + inter
            loss = sigmoid(label[x] * p[0, 0]) - 1
            w0 -= alpha * loss * label[x]
            for i in range(n):
                if data[x, i] != 0:
                    w[i, 0] -= alpha * loss * label[x] * data[x, i]
                for j in range(k):
                    v[i, j] -= alpha * loss * label[x] * (
                        data[x, i] * inter_1[0, j] -
                        v[i, j] * data[x, i] * data[x, i])
        if it % 100 == 0:
            pre = get_prediction(np.mat(data), w0, w, v)
            print(get_cost(np.mat(pre), label))
    return w0, w, v
示例#2
0
def get_prediction(data, w0, w, v):
    """预测值
    :param data:特征
    :param w0:一次项权重
    :param w:常数项权重
    :param v:交叉项权重
    :return:预测结果
    """
    m = np.shape(data)[0]
    result = []
    for x in range(m):
        inter_1 = data[x] * v
        inter_2 = np.multiply(data[x], data[x]) * np.multiply(v, v)
        inter = np.sum(np.multiply(inter_1, inter_1) - inter_2) / 2.
        p = w0 + data[x] * w + inter
        pre = sigmoid(p[0, 0])
        result.append(pre)
    return result
示例#3
0
def lr_train_bgd(feature: np.array, label: np.array, max_cycle: int,
                 alpha: float) -> np.mat:
    """利用梯度下降法训练逻辑回归模型(LR)
    :param feature:特征
    :param label:标签
    :param max_cycle:最大迭代次数
    :param alpha:学习率
    :return:w的权重
    """
    n = np.shape(feature)[1]  # 特征个数
    w = np.random.rand(n).reshape((n, 1))  # 随机初始化权重
    i = 0
    while i <= max_cycle:
        i += 1
        h = sigmoid(np.dot(feature, w))  # 做点乘,计算sigmoid值
        err = label - h  # 误差
        if i % 100 == 0:
            print(f'error rate: {error_rate(h, label)}')
        w += alpha * np.dot(feature.T, err)  # wi+ 1= wi+ α·d 梯度下降进行权重修正

    return w
示例#4
0
def lr_train_bgd(feature: np.array, label: np.array, max_cycle: int,
                 alpha: float) -> np.mat:
    """利用梯度下降法训练逻辑回归模型(LR)
    :param feature:特征
    :param label:标签
    :param max_cycle:最大迭代次数
    :param alpha:学习率
    :return:w的权重
    """
    n = np.shape(feature)[1]  # 特征个数
    w = np.random.rand(n).reshape((n, 1))  # 随机初始化权重
    i = 0
    while i <= max_cycle:
        i += 1
        h = sigmoid(np.dot(feature, w))  # 做点乘,计算sigmoid值
        err = label - h  # 误差
        if i % 100 == 0:
            print(f'error rate: {error_rate(h, label)}')
        w += alpha * np.dot(feature.T, err)  # wi+ 1= wi+ α·d 梯度下降进行权重修正

    return w


if __name__ == '__main__':
    weight = lr_train_bgd(TRAIN_DATA, TEST_LABEL, 2000, 0.01)
    print(
        sigmoid(
            np.dot(np.array([[1, 1 / 24, 10 / 60, 32 / 60]], dtype=np.float),
                   weight)))