data_transforms_test = albumentations.Compose([ albumentations.Resize(RESIZE_H, RESIZE_W), albumentations.Normalize(), AT.ToTensor() ]) # load the best model and initialize EmbeddingNet siamese_net = torch.load("best_net.pth") state_dict = siamese_net.state_dict() embed_net = EmbeddingNet() temp_dict = {} for key in state_dict.keys(): if key.startswith("embedding_net"): temp_dict[key[14:len(key)]] = state_dict[key] embed_net.load_state_dict(temp_dict) # function to generate embedding def getEmbedding(file_path, x): file_name = os.path.join(file_path, x) bbox = bbox_df.loc[bbox_df.Image == x, :].values[0, 1:] img_pil = Image.open(file_name).crop(bbox).convert('RGB') img = np.array(img_pil) image = data_transforms_test(image=img)['image'].unsqueeze(0) vector = embed_net(image) return vector # test embed_net.eval()
labels = np.zeros(len(dataloader.dataset)) k = 0 for images, target in dataloader: if cuda: images = images.cuda() embeddings[k:k + len(images)] = model.get_embedding( images).data.cpu().numpy() labels[k:k + len(images)] = target.numpy() k += len(images) return embeddings, labels # In[4]: model = EmbeddingNet() model.load_state_dict(torch.load('./saved_model/titi')) model.eval() # In[5]: final_test_epoch('/Users/ayush/projects/my_pytorch/probe', '/Users/ayush/projects/my_pytorch/gallery', '/Users/ayush/projects/my_pytorch/fp_output_txt', model, metrics=[AverageNonzeroTripletsMetric()], transform=transforms.Compose([transforms.ToTensor()])) # In[7]: get_ipython().system('rm /Users/ayush/projects/my_pytorch/probe/.DS_Store')