def FederatedLearning(params, train_data, test_set, train_set_item):
    print('Federated Learning with local RBM')
    print('\t Initialize global model ......')
    global_model_weights, global_model_visible_bias, global_model_hidden_bias = \
        init_global_model((params.rbm_visible_unit, params.rbm_hidden_unit))

    # load the train data
    rec_batch = 64
    train_data_loader = torch.utils.data.DataLoader(train_data, batch_size=rec_batch)

    precision_each_round, recall_each_round, f_measure_each_round, ndcg_each_round, hit_num_each_round = [], [], [], [], []

    print('\t Start Federated Learning communication round ......')
    for r in range(params.max_communication_round):
        print('\t \t Communication round %d training:' % r)
        # 用户随机采样,生成乱序的的用户排序,取前c * N个
        perm = np.random.permutation(params.client_num)
        sample_user_num = int(params.c * params.client_num)
        sample_user = perm[:sample_user_num].tolist()

        weight_accountant, visible_bias_accountant, hidden_bias_accountant = [], [], []
        error_accountant = []
        for u in sample_user:
            u_train_data = torch.zeros([params.b, params.rbm_visible_unit])
            u_train_data[0] = train_data[u]
            # download global model and initialize local RBM
            u_rbm = RBM(params.rbm_visible_unit, params.rbm_hidden_unit, params.rbm_k,
                        global_model_weights, global_model_visible_bias, global_model_hidden_bias,
                        use_cuda=params.CUDA)
            for epoch in range(params.e):
                epoch_error = 0.0
                if params.CUDA:
                    u_train_data = u_train_data.cuda()
                epoch_error = u_rbm.contrastive_divergence(u_train_data)
            error_accountant.append(epoch_error)
            weight_accountant.append(u_rbm.weights)
            visible_bias_accountant.append(u_rbm.visible_bias)
            hidden_bias_accountant.append(u_rbm.hidden_bias)
        print('\t \t update the global model')
        global_model_weights = 1 / sample_user_num * sum(weight_accountant)
        global_model_visible_bias = 1 / sample_user_num * sum(visible_bias_accountant)
        global_model_hidden_bias = 1 / sample_user_num * sum(hidden_bias_accountant)

        print('\t \t evaluate the global model')
        print('********************')
        print('the total error is %0.4f' % sum(error_accountant))
        precision_mean, recall_mean, f_measure_mean, ndcg_mean, hit_num_mean = evaluate_global_model(global_model_weights, global_model_visible_bias, global_model_hidden_bias,
                              params.client_num, params.rbm_visible_unit, train_data_loader, test_set, train_set_item,
                              params.CUDA, rec_batch)

        precision_each_round.append(precision_mean)
        recall_each_round.append(recall_mean)
        f_measure_each_round.append(f_measure_mean)
        ndcg_each_round.append(ndcg_mean)
        hit_num_each_round.append(hit_num_mean)
        print('********************')
    return global_model_weights, global_model_visible_bias, global_model_hidden_bias, \
           precision_each_round, recall_each_round, f_measure_each_round, ndcg_each_round, hit_num_each_round
Exemple #2
0
########## TRAINING RBM ##########
print('Training RBM...')

rbm = RBM(VISIBLE_UNITS, HIDDEN_UNITS, CD_K, use_cuda=CUDA)

for epoch in range(EPOCHS):
    epoch_error = 0.0

    for batch, _ in train_loader:
        batch = batch.view(len(batch), VISIBLE_UNITS)  # flatten input data

        if CUDA:
            batch = batch.cuda()

        batch_error = rbm.contrastive_divergence(batch)

        epoch_error += batch_error

    print('Epoch Error (epoch=%d): %.4f' % (epoch, epoch_error))

########## EXTRACT FEATURES ##########
print('Extracting features...')

train_features = np.zeros((len(train_dataset), HIDDEN_UNITS))
train_labels = np.zeros(len(train_dataset))
test_features = np.zeros((len(test_dataset), HIDDEN_UNITS))
test_labels = np.zeros(len(test_dataset))

for i, (batch, labels) in enumerate(train_loader):
    batch = batch.view(len(batch), VISIBLE_UNITS)  # flatten input data
Exemple #3
0
def FederatedLearning(params, train_data, test_set, train_set_item, divide_user, Top_K):
    print('Federated Learning with local RBM')
    print('\t Initialize global model ......')
    global_model_weights, global_model_visible_bias, global_model_hidden_bias = \
        init_global_model((params.rbm_visible_unit, params.rbm_hidden_unit))

    # load the train data
    rec_batch = 64
    train_data_loader = torch.utils.data.DataLoader(train_data, batch_size=rec_batch)

    precision_each_round, recall_each_round, f_measure_each_round, ndcg_each_round, hit_num_each_round = [], [], [], [], []

    area_num = len(divide_user)
    area_best = [GlobalBest(Top_K) for i in range(area_num)]
    globalbest = GlobalBest(Top_K)

    round_train_time = []
    print('\t Start Federated Learning communication round ......')
    for r in range(params.max_communication_round):
        t0 = time.time() # communicatoin round r training start
        print('\t \t Communication round %d training:' % r)
        # 用户随机采样,生成乱序的的用户排序,取前c * N个
        perm = np.random.permutation(params.client_num)
        sample_user_num = int(params.c * params.client_num)
        sample_user = perm[:sample_user_num].tolist()

        weight_accountant, visible_bias_accountant, hidden_bias_accountant = [], [], []
        error_accountant = []
        # 遍历采样得到的每一个节点(用户),每一个节点的数据训练得到一个RBM模型
        for u in sample_user:
            u_train_data = torch.zeros([params.b, params.rbm_visible_unit])
            u_train_data[0] = train_data[u]
            # download global model and initialize local RBM
            u_rbm = RBM(params.rbm_visible_unit, params.rbm_hidden_unit, params.rbm_k,
                        global_model_weights, global_model_visible_bias, global_model_hidden_bias,
                        use_cuda=params.CUDA)
            for epoch in range(params.e):
                epoch_error = 0.0
                if params.CUDA:
                    u_train_data = u_train_data.cuda()
                epoch_error = u_rbm.contrastive_divergence(u_train_data)
            error_accountant.append(epoch_error)
            weight_accountant.append(u_rbm.weights)
            visible_bias_accountant.append(u_rbm.visible_bias)
            hidden_bias_accountant.append(u_rbm.hidden_bias)

        # 模型聚合
        print('\t \t update the global model')
        global_model_weights = 1 / sample_user_num * sum(weight_accountant)
        global_model_visible_bias = 1 / sample_user_num * sum(visible_bias_accountant)
        global_model_hidden_bias = 1 / sample_user_num * sum(hidden_bias_accountant)
        t1 = time.time()
        round_train_time.append(t1 - t0)
        print('\t \t train time is %0.4f' % (t1 - t0))
        print('\t \t evaluate the global model')
        print('********************')
        print('the total error is %0.4f' % sum(error_accountant))

        rec_top_k_list = predict_model(global_model_weights,global_model_visible_bias, global_model_hidden_bias,
                                       params.client_num, params.rbm_visible_unit,
                                       train_data, train_set_item, params.CUDA, rec_batch)

        pre, rec, f_mea, ndcg, hit_num, _map = compute_metrice_according_to_rec_list(rec_top_k_list, test_set, params.rbm_visible_unit,
                                                                                     Top_K)
        globalbest.update_best_metrics(Top_K, pre, rec, f_mea, ndcg, hit_num, _map, r)
        for i in range(area_num):
            area_divide_user = divide_user[i]
            pre, rec, f_mea, ndcg, hit_num, _map = eval_area_metrics(rec_top_k_list, area_divide_user, params.rbm_visible_unit, test_set,
                                                               Top_K)
            area_best[i].update_best_metrics(Top_K, pre, rec, f_mea, ndcg, hit_num, _map, r)
        print('********************')

    print('total train time is %0.4f' % sum(round_train_time))
    return area_best, globalbest