Exemplo n.º 1
0
def sample_repos(n):
    log.init_log()
    db.connect_db()

    db.sample_projects(n)

    log.close_log()
Exemplo n.º 2
0
def main():
    log.init_log()
    db.connect_db()

    # Code here
    plots.create_all_histograms()

    log.close_log()
Exemplo n.º 3
0
def start_sample_matrix(ratio, lambdas, lr, table_name, l):
    MAE = 0
    RMSE = 0
    start_time = time.strftime('%Y-%m-%d-%H-%M-%S',
                               time.localtime(time.time()))
    log.start_log(
        start_time, "../matrix/" + table_name.decode('utf-8') + "/" +
        "MF矩阵分解结果.txt".decode('utf-8'))
    f = log.write_log()

    lc_table_name = 'lc_' + table_name
    tp_table_name = 'tp_' + table_name
    # start预测部分
    # # 得到的矩阵是挖取过值的矩阵
    # C, original_matrix, changed_zero = dm.getMatrix(table_name, ratio)
    C, original_matrix, changed_zero = dm.get_Matrix_from_lc_tp(
        lc_table_name, tp_table_name, ratio, 1)
    # C = np.array(C)
    d = C.shape
    U = np.random.rand(d[0], l)
    V = np.random.rand(d[1], l)
    print "开始矩阵分解"
    matrix, X, Y = simple_mat.matrix_factorization(C, U, V, lambdas, step, lr)
    # 开始验证
    print "开始验证"
    matrix0, pre_or_mat, num = de.norma_matrix(matrix, original_matrix)
    MAE, RMSE = vali.validate(matrix, original_matrix, changed_zero)
    # #end
    # end预测部分
    file_path = "../matrix/" + table_name.decode('utf-8')
    t = str(ratio) + "_" + str(num) + "_" + start_time + ".txt"
    # start将矩阵分解后的矩阵保存
    np.savetxt(file_path + "/matrix_factorization/MF_matrix_factorization_" +
               str(ratio) + ".txt",
               matrix,
               fmt='%.8f')
    # end将矩阵分解后的矩阵保存
    # start将原矩阵经预测填充后的矩阵保存
    np.savetxt(file_path + "/result/MF_pre_" + str(ratio) + ".txt",
               pre_or_mat,
               fmt='%.8f')
    # end 将原矩阵经预测填充后的矩阵保存
    # start将矩阵分解后的矩阵(处理过的,负数变0)保存
    np.savetxt(file_path + "/out/MF_" + t.decode('utf-8'), matrix0, fmt='%.8f')
    # end 将矩阵分解后的矩阵(处理过的,负数变0)保存
    # end
    end_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
    print >> f, "开始时间:", start_time
    print >> f, "结束时间:", end_time
    # 显示梯度下降情况
    title = 'lr:{0} alpha:{1} beta:{2} step:{3} lambdas:{4} sim:{5}'
    title = title.format(lr, ab[0], ab[1], step, lambdas, simlambda)
    print >> f, "参数:", title
    figure_path = "../matrix/" + table_name.decode(
        'utf-8') + "/figure/MF_" + str(ratio) + "_" + start_time + ".jpg"
    figure.paint1(X, Y, title, figure_path)
    log.close_log()
    return MAE, RMSE
Exemplo n.º 4
0
def download_repos(min_stars, max_stars):
    log.init_log()
    db.connect_db()

    log.log("Getting repos from github api")
    repos = github.get_repos(min_stars, max_stars)

    log.log("Saving repos in database")
    # Save repos in db
    for repo in repos:
        db.insert_project(repo)

    log.log("Repos saved in database")

    log.close_log()
Exemplo n.º 5
0
def main():
    '''
    Main entry point, start the server.
    '''
    # Tell Tornado to parse the command line for us.
    tornado.options.parse_command_line()

    # Init logging to file
    init_file_log(logging.DEBUG)
    # Set the console logging level
    if options.debug:
        logger.setLevel(logging.DEBUG)
        options.logging = "debug"
        tornado.log.enable_pretty_logging(options,
                                          logging.getLogger("tornado.access"))
        tornado.log.enable_pretty_logging(
            options, logging.getLogger("tornado.application"))
        tornado.log.enable_pretty_logging(options,
                                          logging.getLogger("tornado.general"))
    else:
        logger.setLevel(logging.INFO)

    logger.info("Project intro WebSocket server.")

    # Intital setup of the Raspberry Pi.
    # GPIO.setwarnings(False)
    GPIO.setmode(GPIO.BCM)

    # Create a Tornado HTTP and WebSocket server.
    http_server = tornado.httpserver.HTTPServer(APP)
    http_server.listen(options.port)
    logger.info("Listening on port: " + str(options.port))

    # Start the Tornado event loop.
    tornado.ioloop.IOLoop.instance().start()

    # Close the log if we're done.
    close_log()
def caculate_for_diversity(table_name, ratio, top_k=20,greedy_Lambda = 0.6,r = 0.001):
    """
    多样性实验
    :param table_name:
    :param ratio:
    :param r:
    :param top_k:
    :return:
    """
    # 上海交大,合工大(老区),
    # people = ['31.031583,121.442614','31.849273,117.302611','40.011006,116.338897']
    # diseases = ['高血压','','']
    # 获取对应比率的预测矩阵
    matrix = dealtxt.load_file_to_array(
        "../matrix/" + table_name[3:].decode('utf-8') + "/result/pre_" + str(ratio) + ".txt");
    recommend_server = re_div.Recommend_Server()
    final_hos_rank = recommend_server.filter_hos_by_sorted(table_name)
    # 加上医院的最终排名信息,得到的医生信息,但不包含预测填充后的评分 doc_info:[[doc_id,doc_name,hos_name,rank,hos_locat]]
    doc_info_un = recommend_server.get_doc_info(table_name, final_hos_rank)

    # start循环开始
    diseases = recommend_server.get_disease(table_name)
    peoples = recommend_server.get_university(table_name)#peoples:[[name,latlng]...]
    random_disease = mathn.get_random_sort(diseases)
    mmr_list = []
    mmr_list_matrix_index = []
    two_list = []
    two_list_matrix_index = []

    for chosen_disease in diseases[:5]:
        un_peoples = mathn.get_random_sort(peoples)
        for chosen_people in un_peoples[:5]:
            # 添加车程信息,并排除掉超出指定车程范围的医院的医生, doc_info:[[doc_id,doc_name,hos_name,rank,hos_locat,duration]]
            # people_hospital_duration{"hos_name",duration}
            doc_info_un2, people_hospital_duration = recommend_server.filter_by_sql_duration(chosen_people, doc_info_un)
            # doc_info_un2, people_hospital_duration = recommend_server.filter_by_duration(chosen_people, doc_info_un)
            print "医生数量:", len(doc_info_un2)
            # 得到预测填充后的评分,{"hos_id":point}
            forecast_socre = recommend_server.matrix_score_by_disease(chosen_disease, table_name, matrix)
            print "填充矩阵评分:", forecast_socre
            # 添加评分,得到对应疾病最终的医生信息, doc_info:[[doc_id,doc_name,hos_name,rank,hos_locat,duration,socre]]
            doc_info = recommend_server.add_score(doc_info_un2, forecast_socre)
            print "医生数量:", len(doc_info)
            # ls_sort:[index]按weight降序排列后的
            ls_sort = recommend_server.weighth_with_duration(doc_info)
            print "按weight降序医生数量:", len(ls_sort)
            # doc_id_dict:{"doc_id":index}
            doc_id_dict = greedy.get_doc_id_dict(table_name)
            sim_matrix = get_sim_matrix(matrix)
            print "ls_sort数量:", len(ls_sort)
            print "MMR====================================MMR"
            recommend_list_mmr, recommend_list_mmr_index = mmr.MMR(doc_info,doc_id_dict, sim_matrix,greedy_Lambda,top_k)
            print "recommend_list_mmr",recommend_list_mmr
            print "recommend_list_mmr_index",recommend_list_mmr_index
            mmr_list.append(recommend_list_mmr)
            mmr_list_matrix_index.append(recommend_list_mmr_index)
            print "two====================================two"
            recommend_list_two, recommend_list_two_index = greedy.greedy_two(ls_sort, doc_info, doc_id_dict, sim_matrix, r, top_k)
            two_list.append(recommend_list_two)
            two_list_matrix_index.append(recommend_list_two_index)
            # end循环结束

    # # 添加车程信息,并排除掉超出指定车程范围的医院的医生, doc_info:[[doc_id,doc_name,hos_name,rank,hos_locat,duration]]
    # # people_hospital_duration{"hos_name",duration}
    # doc_info_un2, people_hospital_duration = recommend_server.filter_by_duration(people, doc_info_un, 4)
    # print "医生数量:", len(doc_info_un2)
    # # 得到预测填充后的评分,{"hos_id":point}
    # forecast_socre = recommend_server.matrix_score_by_disease(disease, table_name, matrix)
    # print "填充矩阵评分:", forecast_socre
    # # 添加评分,得到对应疾病最终的医生信息, doc_info:[[doc_id,doc_name,hos_name,rank,hos_locat,duration,socre]]
    # doc_info = recommend_server.add_score(doc_info_un2, forecast_socre)
    # print "医生数量:", len(doc_info)
    # # ls_sort:[index]按weight降序排列后的
    # ls_sort = recommend_server.weighth_with_duration(doc_info)
    # print "按weight降序医生数量:", len(ls_sort)
    # # doc_id_dict:{"doc_id":index}
    # doc_id_dict = greedy.get_doc_id_dict(table_name)
    # sim_matrix = get_sim_matrix(matrix)
    # # 医院总数
    # hos_type_sum = recommend_server.static_type_sum(doc_info, 2)
    # # 阈值
    # r = 0.0004
    # # top_k
    # k = 20
    # print "ls_sort数量:", len(ls_sort)
    # recommend_list_one,recommend_list_two_index = greedy.greedy_one(ls_sort, doc_info, doc_id_dict, hos_type_sum, sim_matrix, r, k)
    # print "two====================================two"
    # recommend_list_two,recommend_list_two_index = greedy.greedy_two(ls_sort, doc_info, doc_id_dict, hos_type_sum, sim_matrix, r, k)
    # # end循环结束

    # start结果输出
    fu.savetxt("../matrix/" + table_name[3:].decode('utf-8') + "/mmr_recommend_" + str(ratio) + ".txt",
               mmr_list)
    fu.savetxt("../matrix/" + table_name[3:].decode('utf-8') + "/mmr_recommend_matrix_index_" + str(ratio) + ".txt",
               mmr_list_matrix_index)
    np.savetxt("../matrix/" + table_name[3:].decode('utf-8') + "/two_recommend_" + str(ratio) + ".txt",
               np.array(two_list,dtype = int),
               fmt='%d')
    np.savetxt("../matrix/" + table_name[3:].decode('utf-8') + "/two_recommend_matrix_index_" + str(ratio) + ".txt",
               np.array(two_list_matrix_index,dtype = int),
               fmt='%d')
    start_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
    mmr_diversity = system_diversity(mmr_list_matrix_index, table_name)
    two_diversity = system_diversity(two_list_matrix_index, table_name)

    log.start_log(start_time,
                  "../matrix/" + table_name[3:].decode('utf-8') + "/" + "推荐排序结果.txt".decode('utf-8'))
    f = log.write_log()
    print >> f, "recommend_list_one:", recommend_list_mmr
    for i in range(len(recommend_list_mmr)):
        # print "int(recommend_list[i])",recommend_list_one[i]
        print >> f, ("%-25s%-25s" % (doc_info[int(recommend_list_mmr[i])][2].encode('utf-8'),
                                     doc_info[int(recommend_list_mmr[i])][1].encode('utf-8')))
    print >> f, "=================================================="
    print >> f, "recommend_list_two:", recommend_list_two
    for j in range(len(recommend_list_two)):
        # print "int(recommend_list[i])",recommend_list_two[i]
        print >> f, ("%-25s%-20s" % (doc_info[int(recommend_list_two[j])][2].encode('utf-8'),
                                     doc_info[int(recommend_list_two[j])][1].encode('utf-8')))
    # print >> f, ("diversity:%-20s" % (two_diversity))
    # print ("diversity:%-20s" % (two_diversity))
    # print >> f, ("diversity:%-20s" % (one_diversity))
    # print ("diversity:%-20s" % (one_diversity))
    print ("diversity:%-20s%-20s" % (mmr_diversity, two_diversity))
    print >> f, ("diversity:%-20s%-20s" % (mmr_diversity,two_diversity))
    log.close_log()
    # end结果输出
    return mmr_diversity, two_diversity
Exemplo n.º 7
0
            timeandfroms.append(time.a.get_text())
    return timeandfroms




max_try = 3
prefix_url = 'http://guangdiu.com/index.php?p='
host_url = 'http://guangdiu.com/'
count_per_page = 30
database_name = 'Guangdiu'
collection_Items = 'Items'
collection_Errors = 'Errors'
collection_push = 'Push'

if __name__ == '__main__':
    #初始化两个数据库,log文件的文件描述符
    col_Items, col_Errors = init(database_name, collection_Items, collection_Errors)
    #初始化推送数据库
    col_push = init_push(database_name, collection_push)
    if col_Items == None or col_Errors == None:
        log.log_prn('Get', 'Get Collections Failed','ERROR')
    else:
        log.log_prn('Get', 'Init end.', 'NORMAL')
        while True:
            get_pages(1)
            log.log_prn("Get", "Waiting for next scan...", "NORMAL")
            op_time.delay(60, 10)# 每60秒左右 获取一次

        log.close_log()
Exemplo n.º 8
0
def getresult(l, table_name, lambdas, ab, simlambda, lr, step, sim_k, ratio):
    MAE = 0
    RMSE = 0
    alpha = ab[0]
    beta = ab[1]
    start_time = time.strftime('%Y-%m-%d-%H-%M-%S',
                               time.localtime(time.time()))
    log.start_log(
        start_time, "../matrix/" + table_name.decode('utf-8') + "/" +
        "矩阵分解结果.txt".decode('utf-8'))
    f = log.write_log()

    lc_table_name = 'lc_' + table_name
    tp_table_name = 'tp_' + table_name
    # start预测部分
    # # 得到的矩阵是挖取过值的矩阵
    # C, original_matrix, changed_zero = dm.getMatrix(tp_table_name, ratio)
    C, original_matrix, changed_zero = dm.get_Matrix_from_lc_tp(
        lc_table_name, tp_table_name, ratio, 1)
    # C = np.array(C)
    d = C.shape
    U = np.random.rand(d[0], l)
    V = np.random.rand(d[1], l)
    print "开始矩阵分解"
    matrix, X, Y, loss = de.matrix_factorization(C, U, V, lambdas, step, alpha,
                                                 beta, simlambda, lr, sim_k,
                                                 tp_table_name)
    # 开始验证
    print "开始验证"
    matrix0, pre_or_mat, num = de.norma_matrix(matrix, original_matrix)
    MAE, RMSE = vali.validate(matrix, original_matrix, changed_zero)
    # #end
    # end预测部分

    file_path = "../matrix/" + table_name.decode('utf-8')
    t = str(ratio) + "_" + str(num) + "_" + start_time + ".txt"
    # start将矩阵分解后的矩阵保存
    np.savetxt(file_path + "/matrix_factorization/matrix_factorization_" +
               str(ratio) + ".txt",
               matrix,
               fmt='%.8f')
    # end将矩阵分解后的矩阵保存
    # start将原矩阵经预测填充后的矩阵保存
    # filematrix0 = open(file_path + "/result/pre_" + str(ratio) + "." + "txt", 'w');
    # filematrix0.close()
    np.savetxt(file_path + "/result/pre_" + str(ratio) + ".txt",
               pre_or_mat,
               fmt='%.8f')
    # end 将原矩阵经预测填充后的矩阵保存
    # start将矩阵分解后的矩阵(处理过的,负数变0)保存
    # filematrix1 = open(file_path + "/out/" + t.decode('utf-8'), 'w');
    # filematrix1.close()
    np.savetxt(file_path + "/out/" + t.decode('utf-8'), matrix0, fmt='%.8f')
    # end 将矩阵分解后的矩阵(处理过的,负数变0)保存
    # end

    # k, disease, mitrax, table_name
    # top_k = dv.getTopK(k, disease, matrix, table_name)
    # 筛选医院,得到与目标人物处在同一个城市的医院
    # pre_top = dv.pre_data(top_k)
    # 筛选医院
    # matrix = dealtxt.load_file_to_array("../matrix/" + table_name.decode('utf-8') + "/result/pre_" + str(ratio) + ".txt");
    # filter_hos = dv.filter_hos_By_sorted100(table_name,matrix,disease,people_locat)
    # dv.getResult(pre_top,disease)
    # dv.getResult1(disease,matrix,table_name,city,people_locat)
    end_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
    print >> f, "开始时间:", start_time
    print >> f, "结束时间:", end_time
    # 显示梯度下降情况
    title = 'lr:{0} alpha:{1} beta:{2} step:{3} lambdas:{4} sim:{5} sim_k:{6}'
    title = title.format(lr, ab[0], ab[1], step, lambdas, simlambda, sim_k)
    print >> f, "参数:", title
    figure_path = "../matrix/" + table_name.decode('utf-8') + "/figure/" + str(
        ratio) + "_" + end_time + ".jpg"
    figure.paint1(X, Y, title, figure_path)
    log.close_log()
    return MAE, RMSE, loss
def caculate_for_diversity(table_name,
                           ratio,
                           r=0.0004,
                           top_k=20,
                           greedy_Lambda=0.6):
    """
    多样性实验
    :param table_name:
    :param ratio:
    :param r:
    :param top_k:
    :return:
    """
    # 上海交大,合工大(老区),
    # people = ['31.031583,121.442614','31.849273,117.302611','40.011006,116.338897']
    # diseases = ['高血压','','']
    # 获取对应比率的预测矩阵
    matrix = dealtxt.load_file_to_array("../matrix/" +
                                        table_name[3:].decode('utf-8') +
                                        "/result/pre_" + str(ratio) + ".txt")
    recommend_server = re_div.Recommend_Server()
    final_hos_rank = recommend_server.filter_hos_by_sorted(table_name)
    # 加上医院的最终排名信息,得到的医生信息,但不包含预测填充后的评分 doc_info:[[doc_id,doc_name,hos_name,rank,hos_locat]]
    doc_info_un = recommend_server.get_doc_info(table_name, final_hos_rank)

    # start循环开始
    diseases = recommend_server.get_disease(table_name)
    if table_name[3:] == '神经内科':
        sql = 'select  *  from seven20_university_b'
        peoples = recommend_server.get_university_sql(
            table_name, sql)  # peoples:[[name,latlng]...]
    else:
        sql = 'select  *  from seven20_university_a'
        peoples = recommend_server.get_university_sql(
            table_name, sql)  # peoples:[[name,latlng]...]
    # peoples = recommend_server.get_university(table_name)#peoples:[[name,latlng]...]
    random_disease = mathn.get_random_sort(diseases)

    save_random_disease = []  #用于存储中间数据
    save_random_people = []  # 用于存储中间数据
    original_list = []
    original_list_matrix_index = []
    two_list = []
    two_list_matrix_index = []
    greedy2_list = []
    greedy2_list_matrix_index = []
    swap2_list = []
    swap2_list_matrix_index = []
    dum_list = []
    dum_list_matrix_index = []
    mmr_list = []
    mmr_list_matrix_index = []
    vns_list = []
    vns_list_matrix_index = []

    random_diss = random_disease[:20]
    for chosen_disease in random_diss:
        un_peoples = mathn.get_random_sort(peoples)[:100]
        save_random_people.append(un_peoples)  # 存储中间数据
        for chosen_people in un_peoples:
            # 添加车程信息,并排除掉超出指定车程范围的医院的医生, doc_info:[[doc_id,doc_name,hos_name,rank,hos_locat,duration]]
            # people_hospital_duration{"hos_name",duration}
            doc_info_un2, people_hospital_duration = recommend_server.filter_by_sql_duration(
                chosen_people, doc_info_un)
            # doc_info_un2, people_hospital_duration = recommend_server.filter_by_duration(chosen_people, doc_info_un)
            if len(doc_info_un2) < top_k:
                ul.write_log('../matrix/systme_diversity_wrong_log.txt',
                             chosen_people.encode('utf-8'))
            # print "医生数量:", len(doc_info_un2)
            # 得到预测填充后的评分,{"hos_id":point}
            forecast_socre = recommend_server.matrix_score_by_disease(
                chosen_disease, table_name, matrix)
            # print "填充矩阵评分:", forecast_socre
            # 添加评分,得到对应疾病最终的医生信息, doc_info:[[doc_id,doc_name,hos_name,rank,hos_locat,duration,socre]]
            doc_info = recommend_server.add_score(doc_info_un2, forecast_socre)
            # print "医生数量:", len(doc_info)
            # ls_sort:[index]按weight降序排列后的,doc_info:[[doc_id,doc_name,hos_name,rank,hos_locat,duration,socre,utility]]
            ls_sort, doc_info = recommend_server.weighth_with_duration(
                doc_info)
            # print "按weight降序医生数量:", len(ls_sort)

            # doc_id_dict:{"doc_id":index}
            # doc_id_dict = greedy.get_doc_id_dict(table_name)
            doc_id_dict, doc_info_matrix_dict, doc_matrix_info_dict = greedy.get_doc_index_dict(
                table_name, doc_info)
            doc_list = get_doc_info_list_m(doc_info_matrix_dict)

            sim_matrix = get_sim_matrix(matrix)

            # print "origin====================================origin"
            recommend_list_original = ls_sort[:top_k]
            recommend_list_original_index = greedy.sort_by_matrix(
                recommend_list_original, doc_id_dict, doc_info)
            original_list.append(recommend_list_original)
            original_list_matrix_index.append(recommend_list_original_index)

            # print "two====================================two"
            recommend_list_two, recommend_list_two_index = greedy.greedy_two(
                ls_sort, doc_info, doc_id_dict, sim_matrix, r, top_k)
            two_list.append(recommend_list_two)
            two_list_matrix_index.append(recommend_list_two_index)

            # print "ls_sort数量:", len(ls_sort)
            recommend_list_greedy2, recommend_list_greedy2_index = greedy.greedy_b2(
                ls_sort, doc_info, doc_id_dict, doc_info_matrix_dict,
                doc_matrix_info_dict, sim_matrix, r, top_k)
            greedy2_list.append(recommend_list_greedy2)
            greedy2_list_matrix_index.append(recommend_list_greedy2_index)

            # print "swap_b2====================================swap_b2"
            recommend_list_swap2, recommend_list_swap2_index = greedy.swap_b2(
                ls_sort, doc_info, doc_id_dict, doc_info_matrix_dict,
                doc_matrix_info_dict, sim_matrix, r, top_k)
            swap2_list.append(recommend_list_swap2)
            swap2_list_matrix_index.append(recommend_list_swap2_index)

            recommend_list_dum, recommend_list_dum_index = greedy.dum(
                ls_sort, doc_info, doc_id_dict, sim_matrix, top_k)
            dum_list.append(recommend_list_dum)
            dum_list_matrix_index.append(recommend_list_dum_index)

            # print "MMR====================================MMR"
            recommend_list_mmr, recommend_list_mmr_index = mmr.MMR(
                doc_info, doc_id_dict, sim_matrix, greedy_Lambda, top_k)
            mmr_list.append(recommend_list_mmr)
            mmr_list_matrix_index.append(recommend_list_mmr_index)
            # print "VNS====================================VNS"
            recommend_list_vns, recommend_list_vns_index = vns.vns(
                doc_info, doc_id_dict, sim_matrix, top_k)
            vns_list.append(recommend_list_vns)
            vns_list_matrix_index.append(recommend_list_vns_index)

    # start结果输出
    #存储中间数据
    fu.savetxt_li("../matrix/" + table_name[3:].decode('utf-8') +
                  "/save_random_disease" + str(ratio) + ".txt",
                  random_diss,
                  chinese=True)  # 输出符合条件的医生矩阵 save_random_people
    fu.savetxt_three_dia("../matrix/" + table_name[3:].decode('utf-8') +
                         "/save_random_people" + str(ratio) + ".txt",
                         save_random_people,
                         chinese=True)
    # fu.savetxt(
    #     "../matrix/" + table_name[3:].decode('utf-8') + "/original_recommend_matrix_index_" + str(ratio) + ".txt",
    #     original_list_matrix_index)
    # fu.savetxt("../matrix/" + table_name[3:].decode('utf-8') + "/one_recommend_matrix_index_" + str(ratio) + ".txt",
    #            one_list_matrix_index)
    # np.savetxt("../matrix/" + table_name[3:].decode('utf-8') + "/two_recommend_matrix_index_" + str(ratio) + ".txt",
    #            np.array(two_list_matrix_index, dtype=int),
    #            fmt='%d')
    # fu.savetxt("../matrix/" + table_name[3:].decode('utf-8') + "/mmr_recommend_matrix_index_" + str(ratio) + ".txt",
    #            mmr_list_matrix_index)
    # fu.savetxt("../matrix/" + table_name[3:].decode('utf-8') + "/vns_recommend_matrix_index_" + str(ratio) + ".txt",
    #            vns_list_matrix_index)

    start_time = time.strftime('%Y-%m-%d-%H-%M-%S',
                               time.localtime(time.time()))
    # 计算系统多样性
    original_diversity = system_diversity(original_list_matrix_index,
                                          table_name)
    two_diversity = system_diversity(two_list_matrix_index, table_name)
    mmr_diversity = system_diversity(mmr_list_matrix_index, table_name)
    vns_diversity = system_diversity(vns_list_matrix_index, table_name)
    greedy2_diversity = system_diversity(greedy2_list_matrix_index, table_name)
    swap2_diversity = system_diversity(swap2_list_matrix_index, table_name)
    dum_diversity = system_diversity(dum_list_matrix_index, table_name)

    log.start_log(
        start_time, "../matrix/" + table_name[3:].decode('utf-8') + "/" +
        "推荐排序结果.txt".decode('utf-8'))
    f = log.write_log()

    print >> f, "recommend_list_original:", recommend_list_original
    for i in range(len(recommend_list_original)):
        # print "int(recommend_list[i])",recommend_list_one[i]
        print >> f, (
            "%-25s%-25s" %
            (doc_info[int(recommend_list_original[i])][2].encode('utf-8'),
             doc_info[int(recommend_list_original[i])][1].encode('utf-8')))
    print >> f, "=================================================="
    # print >> f, "recommend_list_one:", recommend_list_one
    # for i in range(len(recommend_list_one)):
    #     # print "int(recommend_list[i])",recommend_list_one[i]
    #     print >> f, ("%-25s%-25s" % (doc_info[int(recommend_list_one[i])][2].encode('utf-8'),
    #                                  doc_info[int(recommend_list_one[i])][1].encode('utf-8')))
    # print >> f, "=================================================="
    # print >> f, "recommend_list_two:", recommend_list_two
    # for j in range(len(recommend_list_two)):
    #     # print "int(recommend_list[i])",recommend_list_two[i]
    #     print >> f, ("%-25s%-20s" % (doc_info[int(recommend_list_two[j])][2].encode('utf-8'),
    #                                  doc_info[int(recommend_list_two[j])][1].encode('utf-8')))
    print >> f, "=================================================="
    print >> f, "recommend_list_mmr:", recommend_list_mmr
    for i in range(len(recommend_list_mmr)):
        # print "int(recommend_list[i])",recommend_list_one[i]
        print >> f, ("%-25s%-25s" %
                     (doc_info[int(recommend_list_mmr[i])][2].encode('utf-8'),
                      doc_info[int(recommend_list_mmr[i])][1].encode('utf-8')))
    print >> f, "=================================================="
    # print >> f, "recommend_list_vns:", recommend_list_vns
    # for i in range(len(recommend_list_vns)):
    #     # print "int(recommend_list[i])",recommend_list_one[i]
    #     print >> f, ("%-25s%-25s" % (doc_info[int(recommend_list_vns[i])][2].encode('utf-8'),
    #                                  doc_info[int(recommend_list_vns[i])][1].encode('utf-8')))
    # print >> f, "=================================================="
    print >> f, "recommend_list_swap2:", recommend_list_swap2
    for i in range(len(recommend_list_swap2)):
        # print "int(recommend_list[i])",recommend_list_one[i]
        print >> f, (
            "%-25s%-25s" %
            (doc_info[int(recommend_list_swap2[i])][2].encode('utf-8'),
             doc_info[int(recommend_list_swap2[i])][1].encode('utf-8')))
    print >> f, "=================================================="

    print(("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" %
           (ratio, original_diversity, two_diversity, greedy2_diversity,
            swap2_diversity, dum_diversity, mmr_diversity, vns_diversity)))
    print >> f, (
        "%-20s%-20s%-20s%-20s%-20s%-20s%-20s%-20s" %
        (ratio, original_diversity, two_diversity, greedy2_diversity,
         swap2_diversity, dum_diversity, mmr_diversity, vns_diversity))
    log.close_log()
    # end结果输出
    return original_diversity, two_diversity, greedy2_diversity, swap2_diversity, dum_diversity, mmr_diversity, vns_diversity