def test(x_attack, y_attack, metadata_attack, network, sub_key_index, use_hw=True, attack_size=10000, rank_step=10, unmask=False, only_accuracy=False, plain=None): # Cut to the correct attack size x_attack = x_attack[0:attack_size] y_attack = y_attack[0:attack_size] metadata_attack = metadata_attack[0:attack_size] if unmask: y_attack = np.array([ y_attack[i] ^ metadata_attack[i]['masks'][sub_key_index - 2] for i in range(attack_size) ]) # Convert values to hamming weight if asked for if use_hw: y_attack = np.array([HW[val] for val in y_attack]) # Test the model with torch.no_grad(): data = torch.from_numpy(x_attack.astype(np.float32)).to(device) print('x_test size: {}'.format(data.cpu().size())) if plain is None: predictions = F.softmax(network(data).to(device), dim=-1).to(device) else: plain = torch.from_numpy(plain.astype(np.float32)).to(device) predictions = F.softmax(network(data, plain).to(device), dim=-1).to(device) # Print accuracy accuracy(network, x_attack, y_attack, plain=plain) if not only_accuracy: # Calculate num of traces needed return test_model(predictions.cpu().numpy(), metadata_attack, sub_key_index, use_hw=use_hw, rank_step=rank_step, unmask=unmask) else: return None, None
# (epoch + 1, i + 1, running_loss / 2000)) # running_loss = 0.0 print("Epoch {}, loss {}".format(epoch, running_loss / total_batches)) def accuracy(predictions, y_test): _, pred = predictions.max(1) # print(pred) # for i in range(20): # print('i:: {} Prediction: {} Real {}, C: {}'.format(i, # pred[i], # y_test[i], # 'JA' if pred[i] == y_test[i] else '')) z = pred == torch.from_numpy(y_test).to(device) num_correct = z.sum().item() print('Correct: {}'.format(num_correct)) print('Accuracy: {}'.format(num_correct / 10000.0)) with torch.no_grad(): data = torch.from_numpy(x_attack.astype(np.float32)).to(device) print(data.cpu().size()) predictions = F.softmax(net(data).to(device), dim=-1).to(device) d = predictions[0].cpu().numpy() print(np.sum(d)) accuracy(predictions, y_attack) test_model(predictions.cpu().numpy(), metadata_attack, 2, use_hw=use_hw)
# # write wav file # dataWrite = xrec.astype(np.int16) # extremely important! # wavfile.write('./predict777' + str(testdB) + '.wav', freq, dataWrite) # """ # just some other tests # """ # test_model(model, '.\\test\\01.wav', '.\\test\\output_01.wav', # neighbor=neighbor, nffts=nffts, normal_flag=normal_flag) # test_model(model, '.\\test\\02.wav', '.\\test\\output_02.wav', # neighbor=neighbor, nffts=nffts, normal_flag=normal_flag) # test_model(model, '.\\test\\03.wav', '.\\test\\output_03.wav', # neighbor=neighbor, nffts=nffts, normal_flag=normal_flag) # # test_model(model, '.\\test\\5db\\04.wav', '.\\test\\5db\\output_norm0_04.wav', # # neighbor=neighbor, nffts=nffts, normal_flag=normal_flag) # # test_model(model, '.\\test\\5db\\05.wav', '.\\test\\5db\\output_norm0_05.wav', # # neighbor=neighbor, nffts=nffts, normal_flag=normal_flag) """ used for statistics """ noisy_test_samples = os.listdir('G:\\trainData\\'+str(testdB)+'db\\test\\noisy') for each in noisy_test_samples: print('processing file', each, '...') test_model(model, 'G:\\trainData\\'+str(testdB)+'db\\test\\noisy\\'+each, 'G:\\trainData\\'+str(testdB)+'db\\test\\output\\trainN\\7200\\'+each, neighbor=neighbor, nffts=nffts, normal_flag=normal_flag) print('file', each, 'processed!')
# num_traces = 500 # x_profiling = x_profiling[:num_traces, :] # y_profiling = y_profiling[:num_traces, :] if len(model.get_layer(index=0).input_shape) == 3: x_profiling = x_profiling.reshape((x_profiling.shape[0], x_profiling.shape[1], 1)) model.fit(x_profiling, y_profiling, epochs=epochs, batch_size=batch_size, callbacks=callbacks) return model if __name__ == "__main__": for sub_key_index in range(2, 3): model_n = "_model_subkey_{}".format(sub_key_index) # file = '/Data/TU/thesis/src/data/ASCAD_data/ASCAD_databases/subkeys/ASCAD_subkey_{}'.format(sub_key_index) file = '/media/rico/Data/TU/thesis/data/ASCAD.h5' use_hw = True n_classes = 8 if use_hw else 256 model = get_model(model_n, file, epochs=80, batch_size=100, new=False) (_, _), (x_test, y_test), (metadata_profiling, metadata_attack) = \ load_ascad(file, load_metadata=True) x_test = x_test.reshape((x_test.shape[0], x_test.shape[1], 1)) predi = model.predict(x_test) x, y = test_model(predi, metadata_attack, sub_key_index) plt.plot(x, y) plt.grid(True) plt.show()
print("\n", "--" * 20) output = model(images) # b, t, a print("output: ") pred = output.argmax(-1).cpu().numpy() # b,t print(label_map.decode(pred, raw=False)) print("label: ") print(label_map.decode_label(labels, label_lens)) print("--" * 20) model.train() optimizer.zero_grad() output = model(images) probs = output.transpose(0, 1).contiguous().cuda() label_size = label_lens probs_size = torch.IntTensor([probs.size(0)] * probs.size(1)) probs.requires_grad_(True) loss = ctc_loss(probs, labels, probs_size, label_size) loss.backward() optimizer.step() total_loss += loss.item() if step % print_every == 0: print("step: %d, loss: %.5f" % (step, total_loss / print_every)) total_loss = 0 if step % save_state_every == 0: save_state(ckpt_dir, step, model, optimizer) accuracy = test_model(test_dataloader, label_map, model) step += 1
import torch from util import load_state, test_model from input_data import LabelMap, TagsDataset, collate_fn from torchvision import transforms from torch.utils.data import DataLoader from model import CTCModel # test_file = "./benchmarks/IIIT5k/IIII5k.tags" # test_file = "./benchmarks/ic13/ic13_1015.tags" test_file = "./benchmarks/svt/svt.tags" ckpt_dir = "./experiments/master" step = 330000 label_map = LabelMap() n_classes = label_map.num_classes test_transform = transforms.Compose( [transforms.Resize((32, 128)), transforms.ToTensor()]) test_dataset = TagsDataset(test_file, label_map, test_transform) test_dataloader = DataLoader(dataset=test_dataset, batch_size=32, shuffle=True, num_workers=4, collate_fn=collate_fn) network = CTCModel(n_classes) network = network.cuda() load_state(ckpt_dir, step, network) test_model(test_dataloader, label_map, network, print_result=True)