img = img.convert('RGB') return self.transform(img) def __getitem__(self, idx): if idx >= len(self.classes): raise Exception('idx should be 0<= idx < len(classes)') curr_class_samples = self.samples[idx] inds_in_class = random.sample(range(len(curr_class_samples)), 2) anchor = self.get_image(curr_class_samples[inds_in_class[0]]) positive = self.get_image(curr_class_samples[inds_in_class[1]]) label = idx return anchor, positive, label def __len__(self): return len(self.classes) if __name__ == '__main__': net = SqueezeNet() loss_func = NpairLoss() p = PairsDataSet(os.path.join('data', 'CUB_200_2011', 'images', 'val'), get_val_transforms(input_size=224)) loader = DataLoader(p, batch_size=210, shuffle=True) for x in loader: embed0 = net.embed(x[0]) embed1 = net.embed(x[1]) loss = loss_func(embed0, embed1, x[2]) print(loss) pass
input_size = 224 train_trans_list = get_train_transforms(input_size=input_size) train_dataset = datasets.ImageFolder(traindir, train_trans_list) classes = [sample_tuple[1] for sample_tuple in train_dataset.samples] sampler = PrototypicalBatchSampler(classes, 5, 40, 100) train_loader = torch.utils.data.DataLoader( train_dataset, batch_sampler=sampler, num_workers=n_worker) # , pin_memory=True) loss_func = PrototypicalLoss(n_support=5) # net = MobileNetV2(n_class=train_classes) net = SqueezeNet(num_classes=train_classes) random_state_dict = net.state_dict() # state_dict = torch.load(os.path.join('weights', 'mobilenet_v2.pth.tar'), map_location=lambda storage, loc: storage) state_dict = torch.load(os.path.join('weights', 'squeezenet1_0-a815701f.pth'), map_location=lambda storage, loc: storage) state_dict['classifier.1.bias'] = random_state_dict['classifier.1.bias'] state_dict['classifier.1.weight'] = random_state_dict[ 'classifier.1.weight'] net.load_state_dict(state_dict) # random_state_dict = net.state_dict() # # state_dict = torch.load(os.path.join('weights', 'mobilenet_v2.pth.tar'), map_location=lambda storage, loc: storage)
return self.iterations if __name__ == '__main__': classes = range(50) input_size = 224 val_dir = r'C:\dev\studies\deepLearning\fine-grained-few-shot-calssification\data\CUB_200_2011\images\val' val_trans_list = get_val_transforms(input_size=input_size) val_dataset = datasets.ImageFolder(val_dir, get_val_transforms(input_size)) classes = [sample_tuple[1] for sample_tuple in val_dataset.samples] sampler = PrototypicalBatchSampler(classes, 5, 10, 100) # net = MobileNetV2() # state_dict = torch.load(os.path.join('weights', 'mobilenet_v2.pth'), map_location=lambda storage, loc: storage) net = SqueezeNet(num_classes=160) # weights_path = os.path.join('weights', 'squeezenet1_0-a815701f.pth') #weights_path = r"C:\temp\weights\classification.pth" weights_path = r"C:\temp\weights\squeezenet_triplet_hard.pth" state_dict = torch.load(weights_path, map_location=lambda storage, loc: storage) net.load_state_dict(state_dict) net.eval() val_loader = torch.utils.data.DataLoader( val_dataset, num_workers=1, batch_sampler=sampler) # , pin_memory=True) loss_func = PrototypicalLoss(n_support=1) sum_acc = 0 num_acc = 0
def plot_tsne_embeddings(val_dir, weights_path): input_size = 224 train_classes = 160 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") net = SqueezeNet(num_classes=train_classes) net.to(device) if weights_path != 'random': random_state_dict = net.state_dict() state_dict = torch.load(weights_path, map_location=lambda storage, loc: storage) state_dict['classifier.1.bias'] = random_state_dict['classifier.1.bias'] state_dict['classifier.1.weight'] = random_state_dict['classifier.1.weight'] net.load_state_dict(state_dict) else: weights_path = os.path.join('weights', 'random') val_trans_list = get_val_transforms(input_size=input_size) val_dataset = datasets.ImageFolder( val_dir, val_trans_list) val_loader = torch.utils.data.DataLoader( val_dataset, num_workers=1, batch_size=10, shuffle=True) all_embeds = None all_labels = None for i, (batch, labels) in enumerate(val_loader): # if i == 10: # break current_embeds = net.embed(batch.to(device)).detach().cpu().numpy() if i == 0: all_embeds = current_embeds all_labels = labels else: # all_embeds = torch.cat((all_embeds, current_embeds), 0) # all_labels = torch.cat((all_labels, labels), 0) all_embeds = np.concatenate((all_embeds, current_embeds), 0) all_labels = np.concatenate((all_labels, labels), 0) # numpy_embeds = all_embeds.detach().cpu().numpy() # numpy_labels = all_labels.detach().cpu().numpy() numpy_embeds = all_embeds numpy_labels = all_labels # labels_msb, labels_lsb = np.divmod(numpy_labels,len(markers)) tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300) tsne_results = tsne.fit_transform(numpy_embeds) # Create the figure fig = plt.figure(figsize=(8, 8)) ax = fig.add_subplot(1, 1, 1, title='tsne') # Create the scatter colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k'] markers = [".", ",", "o", "v", "^", "<", ">"] assert len(markers) == len(colors) div_value = len(markers) for sample_num in range(tsne_results.shape[0]): ax.scatter( x=tsne_results[sample_num, 0], y=tsne_results[sample_num, 1], c=colors[numpy_labels[sample_num] % div_value], marker=markers[numpy_labels[sample_num] // div_value], cmap=plt.cm.get_cmap('Paired'), alpha=0.9) # fig.show() plt.savefig(weights_path.split('.')[0] + '.png') a = 3
os.path.join("weights", "squeezenet_npairs_best.pth")) nk_best = current_nk torch.save(net.state_dict(), os.path.join("weights", "squeezenet_npairs_last.pth")) loss_sum = 0 num_samples = 0 return device, epochs, net if __name__ == '__main__': train_classes = 160 loss_func = NpairLoss() # net = MobileNetV2(n_class=train_classes) net = SqueezeNet(num_classes=train_classes, should_normalize=False) random_state_dict = net.state_dict() # state_dict = torch.load(os.path.join('weights', 'mobilenet_v2.pth.tar'), map_location=lambda storage, loc: storage) state_dict = torch.load(os.path.join('weights', 'squeezenet1_0-a815701f.pth'), map_location=lambda storage, loc: storage) state_dict['classifier.1.bias'] = random_state_dict['classifier.1.bias'] state_dict['classifier.1.weight'] = random_state_dict[ 'classifier.1.weight'] net.load_state_dict(state_dict) # traindir = os.path.join('data', 'CUB_200_2011_reorganized', 'CUB_200_2011', 'images', 'train') # valdir = os.path.join('data', 'CUB_200_2011_reorganized', 'CUB_200_2011', 'images', 'val')
acc_list = [] counter = 0 for x, y in iter(val_loader): emb = net.embed(x.to(device)) loss, acc = loss_func(emb, y.to(device)) acc_list.append(acc) counter += 1 if counter % 100 == 0: print(counter) accs = torch.stack(acc_list) return torch.mean(accs), torch.std(accs) if __name__ == '__main__': root_dir = os.path.join('data', 'CUB_200_2011', 'images', 'test') # weight_path = 'random' # weight_path = os.path.join('weights', 'squeezenet_class.pth') weight_path = sys.argv[1] net = SqueezeNet() random_state_dict = net.state_dict() state_dict = torch.load(weight_path, map_location=lambda storage, loc: storage) state_dict['classifier.1.bias'] = random_state_dict['classifier.1.bias'] state_dict['classifier.1.weight'] = random_state_dict['classifier.1.weight'] net.load_state_dict(state_dict) net.eval() mean, std = proto_n_way_k_shot(root_dir, 5, 5, net) print('mean: ' + str(mean)) print('std: ' + str(std))
return len(self.list) def __getitem__(self, idx): with open(self.list[idx], 'rb') as f: img = Image.open(f) img = img.convert('RGB') return self.transform(img) if __name__ == '__main__': # root_dir = r"C:\temp\tempfordeep" root_dir = r'C:\dev\studies\deepLearning\fine-grained-few-shot-calssification\data\CUB_200_2011\images\val' # root_dir = r'C:\temp\tempfordeep4' # for seed in range(3): # print(seed) # split_check(root_dir, 2, 1, seed) # net = MobileNetV2() # state_dict = torch.load(os.path.join('weights', 'mobilenet_v2.pth'), map_location=lambda storage, loc: storage) net = SqueezeNet() state_dict = torch.load(os.path.join('weights', 'squeezenet1_0-a815701f.pth'), map_location=lambda storage, loc: storage) net.load_state_dict(state_dict) net.eval() for seed in range(3): acc = n_way_k_shot(root_dir, 5, 5, net, 1000 + seed) print("acc is: " + str(acc))