def __init__(self, apar, cpar, session, asr): Privacy.__init__(self, apar, cpar, session, asr) self.animationTag = self.HIDE_FLOOR_TURN_TAG self.angle = self.ANGLE_LOOK_DOWN # Parse the action parameters self.apar = self.apar.split(' ') # Parse the cultural parameters self.cpar = self.cpar.split(' ') self.volume = float(self.cpar[0]) self.speed = float(self.cpar[1]) self.pitch = float(self.cpar[2]) self.language = self.cpar[3].lower().replace('"', '') self.username = self.cpar[4].replace('"', '') caressestools.Language.setLanguage(self.language) self.setLang(self.language) caressestools.setRobotLanguage(self.session, caressestools.Language.lang_naoqi) caressestools.setVoiceVolume(self.session, self.volume) caressestools.setVoiceSpeed(self.session, self.speed) caressestools.setVoicePitch(self.session, self.pitch)
def test_membership_inference_torfi_mismatch(self): n = 10000 m = 17 missing_value = -999999 pri = Privacy() header = [] for i in range(m): header = np.append(header, 'col' + str(i)) # create dummy dataset for high risk of membership disclosure r_trn = np.random.normal(loc=0, size=(n, m)) r_tst = np.random.normal(loc=0, size=(n, m)) s = np.random.normal(loc=10, size=(n, m)) res_mi = pri.membership_inference(mat_f_r_trn=r_trn, mat_f_r_tst=r_tst, mat_f_s=s, header=header, missing_value=missing_value, mi_type='torfi', n_cpu=1) avg_p_trn = np.mean(res_mi['prob'][np.where(res_mi['label'] == 1)]) avg_p_tst = np.mean(res_mi['prob'][np.where(res_mi['label'] == 0)]) assert np.allclose(avg_p_trn, avg_p_tst, atol=0.05)
def test_distance_euclidean(self): pri = Privacy() metric = 'euclidean' a = np.array([[1, 1]]) b = np.array([[2, 1]]) d = pri.distance(arr1=a, arr2=b, metric=metric) assert d == 1
def test_nearest_neighbor(self): threshold = 1e-5 metric = "euclidean" x = np.array([[1, 1], [4, 4], [5, 4]]) pri = Privacy() nn_dist = pri.nearest_neighbors(arr1=x, metric=metric) assert abs(nn_dist[0] - pri.distance(x[0, :], x[1, :], metric)) < threshold
def test_distance_hamming(self): pri = Privacy() metric = 'hamming' a = np.array([[1, 1]]) b = np.array([[1, 1]]) d = pri.distance(arr1=a, arr2=b, metric=metric) assert d == 0
def test_assess_memorization(self): n = 1000 m = 3 missing_value = -999999 pri = Privacy() header = [] for i in range(m): header = np.append(header, 'col' + str(i)) x_real = np.random.random(size=(n, m)) x_synth = np.random.random(size=(n, m)) res = pri.assess_memorization(mat_f_r=x_real, mat_f_s=x_synth, missing_value=missing_value, header=header, metric='euclidean', debug=False) assert np.mean(res['real']) < np.mean(res['rand'])
rea.save_obj(res, file_name=outfile) outfile = args.outprefix_realism + '_' + args.analysis_realism + '.pdf' rea.plot(res, file_pdf=outfile) msg = rea.summarize(res) print(msg) else: print('Error: do not recognize output_realism option ' + args.output_realism) sys.exit(0) elif args.task == 'privacy': pri = Privacy() pre = Preprocessor(missing_value=args.missing_value_privacy) r_trn = pre.read_file(args.file_privacy_real_train) r_tst = pre.read_file(args.file_privacy_real_test) s = pre.read_file(args.file_privacy_synth) # subsample if args.sample_privacy < len(s['x']): idx = np.random.choice(range(len(s['x'])), args.sample_privacy, replace=False) s['x'] = s['x'][idx, :] if args.sample_privacy < len(r_trn['x']): idx = np.random.choice(range(len(r_trn['x'])), args.sample_privacy, replace=False)