d_lr = 1e-3 n_sample = 1 # Number of Monte Calro Search generate_samples = 2000 # Number of generated sentences # Pretraining parameters g_pre_lr = 1e-3 d_pre_lr = 1e-3 g_pre_epochs = 60 d_pre_epochs = 1 trainer = Trainer(rf_system, B, T, n_authorized, g_H, d_dropout, g_lr=g_lr, d_lr=d_lr, n_sample=n_sample, generate_samples=generate_samples) sig_rd = np.concatenate([sig_auth, sig_impersonate_ad]) txid_rd = np.concatenate([ txid_auth, np.ones((sig_impersonate_ad.shape[0], )) * n_authorized ]) sig_rd, txid_rd = shuffle(sig_rd, txid_rd) txid_disc = txid_rd == n_authorized txid_disc = np.invert(txid_disc) txid_disc = txid_disc.astype(int)
d_dropout = 0.5 # dropout ratio d_lr = 1e-3 n_sample=1 # Number of Monte Calro Search generate_samples = 2000 # Number of generated sentences # Pretraining parameters g_pre_lr = 1e-3 d_pre_lr = 1e-3 g_pre_epochs= 60 d_pre_epochs = 1 d_load_path = rf_system.full_sess_dir+"/discriminator%s.json"%suffix trainer = Trainer(rf_system, B, T, n_authorized, g_H, d_dropout, g_lr=g_lr, d_lr=d_lr, n_sample=n_sample, generate_samples=generate_samples, d_load_path=d_load_path) sig_rd = np.concatenate([sig_auth,sig_impersonate_ad]) txid_rd = np.concatenate([txid_auth,np.ones((sig_impersonate_ad.shape[0],))*n_authorized]) sig_rd, txid_rd = shuffle(sig_rd, txid_rd) txid_disc = txid_rd == n_authorized txid_disc = np.invert(txid_disc) txid_disc = txid_disc.astype(int) test_frac = 0.1 valid_frac = 0.2 n_samples = sig_rd.shape[0] n_test = int(test_frac*n_samples)