コード例 #1
0
ファイル: utils.py プロジェクト: AlexConnat/MPC-Aggreg
def scalar_add_all(scalars):

    if len(scalars) == 1:
        return scalars[0]

    S = mpc.add(scalars[0], scalars[1])

    for i in range(2, len(scalars)):
        S = mpc.add(S, scalars[i])

    return S
コード例 #2
0
def unpadded_correlation(image, kernel):
    hi, wi = image.shape
    hk, wk = kernel.shape

    out = []
    print("***unpadded_correlation***")
    for row_img in np.arange(0, hi - 2):
        for col_img in np.arange(0, wi - 2):
            mul00 = kernel[0,0] * (image[row_img + 0][col_img + 0])
            mul01 = kernel[0,1] * (image[row_img + 0][col_img + 1])
            mul02 = kernel[0,2] * (image[row_img + 0][col_img + 2])
            mul10 = kernel[1,0] * (image[row_img + 1][col_img + 0])
            mul11 = kernel[1,1] * (image[row_img + 1][col_img + 1])
            mul12 = kernel[1,2] * (image[row_img + 1][col_img + 2])
            mul20 = kernel[2,0] * (image[row_img + 2][col_img + 0])
            mul21 = kernel[2,1] * (image[row_img + 2][col_img + 1])
            mul22 = kernel[2,2] * (image[row_img + 2][col_img + 2])
            sum = mpc.add(mul00, mul01)
            sum = mpc.add(sum, mul02)
            sum = mpc.add(sum, mul10)
            sum = mpc.add(sum, mul11)
            sum = mpc.add(sum, mul12)
            sum = mpc.add(sum, mul20)
            sum = mpc.add(sum, mul21)
            sum = mpc.add(sum, mul22)
            out.append(sum)
    out = np.asarray(out)
    out = np.reshape(out, (-1, 98))
    return out
コード例 #3
0
 def update_f_m(self, data, trees, iter, learning_rate, logger):
     f_prev_name = 'f_' + str(iter - 1)
     f_m_name = 'f_' + str(iter)
     data[f_m_name] = data[f_prev_name]
     print(f_m_name)
     print("t;dfd:", type(data[f_m_name].values.tolist()[0]))
     for leaf_node in trees[iter].leaf_nodes:
         # print("xxx:",type(data.loc[leaf_node.data_index, f_m_name]),data.loc[leaf_node.data_index, f_m_name])
         tmp = data.loc[leaf_node.data_index, f_m_name]
         # data.loc[leaf_node.data_index, f_m_name] = mpc.run(mpc.output(tmp.values.tolist()[0])) + learning_rate * leaf_node.predict_value  # cipher and plain
         tmp1 = scale_to_int(2)(learning_rate * leaf_node.predict_value)
         data.loc[leaf_node.data_index, f_m_name] = mpc.add(tmp.values.tolist()[0], tmp1) # cipher and plain
     # 打印每棵树的 train loss
     self.get_train_loss(data['label'], data[f_m_name], iter, logger)
     print("data f_m_nme type:", type(data[f_m_name].values.tolist()[0]))
コード例 #4
0
def calculate_se(label):  # square error
    #print("label:", label)
    new_list = label.values.tolist()
    #print("new list:", new_list)
    if len(new_list) > 0:
        mean_value = mean(new_list)
        #mean = label.mean()
        se = secnum(0)
        #for y in label:
        #se += (y - mean) * (y - mean)
        for ele in new_list:
            tmp = mpc.sub(ele, mean_value)
            tmp = mpc.mul(tmp, tmp)
            se = mpc.add(se, tmp)
        return mpc.run(mpc.output(se))  # should return cipher, deal it lator;
    else:
        return 0
コード例 #5
0
    sec_minimum = mpc.min(all_sec_vi)
    minimum = mpc.run(mpc.output(sec_minimum))
    print(minimum)

elif cmd == 'max':
    sec_maximum = mpc.max(all_sec_vi)
    maximum = mpc.run(mpc.output(sec_maximum))
    print(maximum)

elif cmd == 'mean':
    sec_sum = mpc.sum(all_sec_vi)
    sec_mean = mpc.div(sec_sum, nb_parties)
    mean = mpc.run(mpc.output(sec_mean))
    print(mean)

elif cmd == 'std':
    sec_sum = mpc.sum(all_sec_vi)
    sec_mean = mpc.div(sec_sum, nb_parties)
    sec_stddev = mpc.pow(mpc.sub(all_sec_vi[0], sec_mean), 2)
    for v in all_sec_vi[1:]:
        sec_stddev = mpc.add(sec_stddev, mpc.pow(mpc.sub(v, sec_mean), 2))
    stddev = np.sqrt(
        float(mpc.run(mpc.output(sec_stddev))) / float(nb_parties))
    print(stddev)

###############################################################################
print('\n' + '=' * 50)
mpc.run(mpc.shutdown())  #### END THE MPC ROUNDS OF COMPUTATION ####
print('=' * 50)
###############################################################################