def mtt(data, user_num, poi_num, zero_adjustment=True):
    print "共有" + str(user_num) + "个用户"
    for key in data.keys():
        print "用户" + str(key) + "序列为" + str(data[key])

    correlation_matrix = get_correlation_matrix(data, user_num)
    nor_cor_matrix = normalize(correlation_matrix)
    print "归一化相关系数矩阵: ", nor_cor_matrix

    transition_tensor = build_six_order_transition_tensor(data, poi_num, nor_cor_matrix, zero_adjustment)
    print "转移张量非零元素占比:", sparsity(transition_tensor)
    print "转移张量是否满足随机性:", check_six_order_transition_tensor(transition_tensor)

    return transition_tensor
def mtt(data, user_num, poi_num, zero_adjustment=True):
    print "共有" + str(user_num) + "个用户"
    for key in data.keys():
        print "用户" + str(key) + "序列为" + str(data[key])

    correlation_matrix = get_correlation_matrix(data, user_num)
    nor_cor_matrix = normalize(correlation_matrix)
    print "归一化相关系数矩阵: ", nor_cor_matrix

    transition_tensor = build_six_order_transition_tensor(
        data, poi_num, nor_cor_matrix, zero_adjustment)
    print "转移张量非零元素占比:", sparsity(transition_tensor)
    print "转移张量是否满足随机性:", check_six_order_transition_tensor(transition_tensor)

    return transition_tensor
def dta(new_tensor, rank, variance_matrix_list=None, alpha=None):
    """Dynamic Tensor Analysis"""
    # number of order of the input tensor.
    order = new_tensor.ndims()

    # If the co-variacne matrices are not given,
    # initialize all of them to be 0
    if variance_matrix_list is None:
        variance_matrix_list = []
        dv = new_tensor.shape
        for i in range(0, order):
            variance_matrix_list.extend(
                [sparse.coo_matrix(([], ([], [])), [dv[i], dv[i]])])

    # If the forgetting factor is not given, it is 1.
    if alpha is None:
        alpha = 1

    u = []
    new_variance_matrix_list = []
    for i in range(0, order):
        if new_tensor.__class__ == tensor.tensor:
            new_tensor_matricize = tenmat.tenmat(new_tensor, [i]).tondarray()
        elif new_tensor.__class__ == sptensor.sptensor:
            new_tensor_matricize = sptenmat.sptenmat(new_tensor,
                                                     [i]).tosparsemat()
        elif new_tensor.__class__ == ttensor.ttensor:
            raise TypeError("It is not supported yet.")
            return
        else:
            raise TypeError(
                "1st argument must be tensor, sptensor, or ttensor")
            return

        new_variance_matrix_list.extend([
            numpy.array(alpha * variance_matrix_list[i] + numpy.dot(
                new_tensor_matricize, new_tensor_matricize.transpose()))
        ])
        # print "new,", new_variance_matrix_list
        (eigenvalue, eigenmatrix) = eigwrapper(new_variance_matrix_list[i],
                                               rank[i])
        u.extend([numpy.array(eigenmatrix)])

    # print new_tensor
    core = new_tensor.ttm(u, None, 't')
    reconstruct_tensor = ttensor.ttensor(core, u)
    print "core:", sparsity(core.tondarray().tolist())
    return reconstruct_tensor, new_variance_matrix_list
def dta(new_tensor, rank, variance_matrix_list=None, alpha=None):
    """Dynamic Tensor Analysis"""
    # number of order of the input tensor.
    order = new_tensor.ndims()

    # If the co-variacne matrices are not given,
    # initialize all of them to be 0
    if variance_matrix_list is None:
        variance_matrix_list = []
        dv = new_tensor.shape
        for i in range(0, order):
            variance_matrix_list.extend([sparse.coo_matrix(([], ([], [])), [dv[i], dv[i]])])

    # If the forgetting factor is not given, it is 1.
    if alpha is None:
        alpha = 1

    u = []
    new_variance_matrix_list = []
    for i in range(0, order):
        if new_tensor.__class__ == tensor.tensor:
            new_tensor_matricize = tenmat.tenmat(new_tensor, [i]).tondarray()
        elif new_tensor.__class__ == sptensor.sptensor:
            new_tensor_matricize = sptenmat.sptenmat(new_tensor, [i]).tosparsemat()
        elif new_tensor.__class__ == ttensor.ttensor:
            raise TypeError("It is not supported yet.")
            return
        else:
            raise TypeError("1st argument must be tensor, sptensor, or ttensor")
            return

        new_variance_matrix_list.extend([numpy.array(alpha*variance_matrix_list[i] + numpy.dot(new_tensor_matricize, new_tensor_matricize.transpose()))])
        # print "new,", new_variance_matrix_list
        (eigenvalue, eigenmatrix) = eigwrapper(new_variance_matrix_list[i], rank[i])
        u.extend([numpy.array(eigenmatrix)])

    # print new_tensor
    core = new_tensor.ttm(u, None, 't')
    reconstruct_tensor = ttensor.ttensor(core, u)
    print "core:", sparsity(core.tondarray().tolist())
    return reconstruct_tensor, new_variance_matrix_list
Esempio n. 5
0
        #     return reconstruct_tensor, new_variance_matrix_list

        reconstruct_tensor = None
        variance_matrix_list = None
        for tensor_data in tensor_stream_res[:-2]:
            # print "data:", sparsity(tensor_data)
            reconstruct_tensor, variance_matrix_list = dta(
                tensor.tensor(numpy.array(tensor_data)), (4, 2, 200),
                variance_matrix_list)
            # print sparsity(reconstruct_tensor.totensor().tondarray().tolist())

        res2 = reconstruct_tensor.totensor().tondarray().tolist()
        print res2

        nor_res = dta_normalize_tensor(res2, user_num, time_num, poi_num)
        print "最终张量:", sparsity(res2)
        print nor_res

        # check_tensor = get_check_tensor(check_data, user_num, time_num, poi_num)
        # print check_tensor
        residual2 = delta_tensor_norm(nor_res, check_tensor)

        statistic_res = get_check_tensor(data, user_num, time_num, poi_num)
        residual3 = delta_tensor_norm(statistic_res, check_tensor)

        x_values.append(train_percent)
        y_values1.append(residual1)
        y_values2.append(residual2)
        y_values3.append(residual3)
        train_percent += 0.2
Esempio n. 6
0
        #     return reconstruct_tensor, new_variance_matrix_list

        reconstruct_tensor = None
        variance_matrix_list = None
        for tensor_data in tensor_stream_res[:-2]:
            # print "data:", sparsity(tensor_data)
            reconstruct_tensor, variance_matrix_list = dta(
                tensor.tensor(numpy.array(tensor_data)), (4, 2, 200), variance_matrix_list
            )
            # print sparsity(reconstruct_tensor.totensor().tondarray().tolist())

        res2 = reconstruct_tensor.totensor().tondarray().tolist()
        print res2

        nor_res = dta_normalize_tensor(res2, user_num, time_num, poi_num)
        print "最终张量:", sparsity(res2)
        print nor_res

        # check_tensor = get_check_tensor(check_data, user_num, time_num, poi_num)
        # print check_tensor
        residual2 = delta_tensor_norm(nor_res, check_tensor)

        statistic_res = get_check_tensor(data, user_num, time_num, poi_num)
        residual3 = delta_tensor_norm(statistic_res, check_tensor)

        x_values.append(train_percent)
        y_values1.append(residual1)
        y_values2.append(residual2)
        y_values3.append(residual3)
        train_percent += 0.2
    poi_num = len(axis_pois)
    transition_tensor = mtt(data, user_num, poi_num)

    # equal_all_sum_one: equal
    init_tensor1 = [
        [[1 / (poi_num * time_num * user_num) for i in range(poi_num)] for j in range(time_num)]
        for k in range(user_num)
    ]

    # random_all_sum_one
    temp_tensor = [[[random.random() for i in range(poi_num)] for j in range(time_num)] for k in range(user_num)]
    init_tensor2 = three_tensor_hadarmard(1 / three_order_tensor_first_norm(temp_tensor), temp_tensor)

    # user_slice_sum_one
    init_tensor3 = [
        [[1 / (poi_num * time_num * user_num) for i in range(poi_num)] for j in range(time_num)]
        for k in range(user_num)
    ]

    res1, iterator__values1 = tensor_three_mode_product(transition_tensor, init_tensor1)
    print res1
    print sparsity(res1)

    x, y, z = numpy.mgrid[0:user_num, 0:time_num, 0:poi_num]
    # val = numpy.random.random(z.shape)

    print x.shape, y.shape, z.shape
    # Plot and show in mayavi2
    # pts = mlab.points3d(x, y, z, res1, scale_factor=0.4, transparent=False)
    # mlab.show()
    poi_num = len(axis_pois)

    # print data

    tensor_stream_res = tensor_stream(data, user_num, poi_num, 2)
    print len(tensor_stream_res)

    # def dta(new_tensor, rank, variance_matrix_list=None, alpha=None):
    #     return reconstruct_tensor, new_variance_matrix_list

    reconstruct_tensor = None
    variance_matrix_list = None
    for tensor_data in tensor_stream_res:
        # print "data:", sparsity(tensor_data)
        reconstruct_tensor, variance_matrix_list = dta(tensor.tensor(numpy.array(tensor_data)), (4, 2, 10), variance_matrix_list)
        # print sparsity(reconstruct_tensor.totensor().tondarray().tolist())

    res = reconstruct_tensor.totensor().tondarray().tolist()
    # print res
    print "最终张量:", sparsity(res)

    nor_res = dta_normalize_tensor(res, user_num, time_num, poi_num)
    check_tensor = get_check_tensor(check_data, user_num, time_num, poi_num)
    # print check_tensor
    residual = delta_tensor_norm(nor_res, check_tensor)

    print residual
    print nor_res
    print check_data
    print sparsity(check_tensor)
    print validate_eigen_tensor(nor_res), validate_eigen_tensor(check_tensor)
    # print data

    tensor_stream_res = tensor_stream(data, user_num, poi_num, 2)
    print len(tensor_stream_res)

    # def dta(new_tensor, rank, variance_matrix_list=None, alpha=None):
    #     return reconstruct_tensor, new_variance_matrix_list

    reconstruct_tensor = None
    variance_matrix_list = None
    for tensor_data in tensor_stream_res:
        # print "data:", sparsity(tensor_data)
        reconstruct_tensor, variance_matrix_list = dta(
            tensor.tensor(numpy.array(tensor_data)), (4, 2, 10),
            variance_matrix_list)
        # print sparsity(reconstruct_tensor.totensor().tondarray().tolist())

    res = reconstruct_tensor.totensor().tondarray().tolist()
    # print res
    print "最终张量:", sparsity(res)

    nor_res = dta_normalize_tensor(res, user_num, time_num, poi_num)
    check_tensor = get_check_tensor(check_data, user_num, time_num, poi_num)
    # print check_tensor
    residual = delta_tensor_norm(nor_res, check_tensor)

    print residual
    print nor_res
    print check_data
    print sparsity(check_tensor)
    print validate_eigen_tensor(nor_res), validate_eigen_tensor(check_tensor)
    poi_num = len(axis_pois)
    transition_tensor = mtt(data, user_num, poi_num)

    # equal_all_sum_one: equal
    init_tensor1 = [[[
        1 / (poi_num * time_num * user_num) for i in range(poi_num)
    ] for j in range(time_num)] for k in range(user_num)]

    # random_all_sum_one
    temp_tensor = [[[random.random() for i in range(poi_num)]
                    for j in range(time_num)] for k in range(user_num)]
    init_tensor2 = three_tensor_hadarmard(
        1 / three_order_tensor_first_norm(temp_tensor), temp_tensor)

    # user_slice_sum_one
    init_tensor3 = [[[
        1 / (poi_num * time_num * user_num) for i in range(poi_num)
    ] for j in range(time_num)] for k in range(user_num)]

    res1, iterator__values1 = tensor_three_mode_product(
        transition_tensor, init_tensor1)
    print res1
    print sparsity(res1)

    x, y, z = numpy.mgrid[0:user_num, 0:time_num, 0:poi_num]
    # val = numpy.random.random(z.shape)

    print x.shape, y.shape, z.shape
    # Plot and show in mayavi2
    # pts = mlab.points3d(x, y, z, res1, scale_factor=0.4, transparent=False)
    # mlab.show()