def get_mobilenet_model(pretain = True,num_classes = 5,requires_grad = True): # 返回去掉了全连接层的mobilenet model = MobileNet() # 不训练这几层 for param in model.parameters(): param.requires_grad = requires_grad if pretain: # Todo: load the pre-trained model for self.base_net, it will increase the accuracy by fine-tuning basenet_state = torch.load("/home/pzl/object-localization/pretained/mobienetv2.pth") # filter out unnecessary keys model_dict = model.state_dict() pretrained_dict = {k: v for k, v in basenet_state.items() if k in model_dict} # load the new state dict model.load_state_dict(pretrained_dict) return model else: return model
model = ResNet50() elif model_name == "res34": model = ResNet34() elif model_name == "vgg11": model = Vgg11Net() else: print("Moddel Wrong") model.to(device) # train/test loss_name = sys.argv[2] batch = sys.argv[3] model.eval() model.load_state_dict( torch.load("/home/lxd/checkpoints/{}/{}_{}_VeRI_{}.pt".format( date, model_name, loss_name, batch))) print("model load {}".format(model_name)) #def trajectory_reader(): # trajectory_path = "/home/lxd/datasets/VeRi/test_track_VeRi.txt" def get_trajectorys(): path = "/home/lxd/datasets/VeRi/image_test" images = os.listdir(path) images.sort() id_cams = [] trajectorys = [] for image in images: if image[:9] not in id_cams: id_cams.append(image[:9])
class HyperTrain(Trainable): def _get_dataset(self, name): normalize = transforms.Normalize( mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010], ) if name == 'FashionMNIST': data_transforms = transforms.Compose([ transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), normalize ]) dataset = torchvision.datasets.FashionMNIST( root="/home/kn15263s/data/FashionMNIST", transform=data_transforms) num_classes = 10 input_size = 512 * 1 * 1 return dataset, num_classes, input_size elif name == 'KMNIST': data_transforms = transforms.Compose([ transforms.Grayscale(num_output_channels=3), transforms.ToTensor(), normalize ]) dataset = torchvision.datasets.KMNIST( root="/home/kn15263s/data/KMNIST", transform=data_transforms, download=True) num_classes = 10 input_size = 512 * 1 * 1 return dataset, num_classes, input_size elif name == 'CIFAR10': data_transforms = transforms.Compose( [transforms.ToTensor(), normalize]) dataset = torchvision.datasets.CIFAR10( root="/home/kn15263s/data/CIFAR10/", transform=data_transforms) num_classes = 10 input_size = 512 * 1 * 1 return dataset, num_classes, input_size elif name == 'SVHN': data_transforms = transforms.Compose( [transforms.ToTensor(), normalize]) dataset = torchvision.datasets.SVHN( root="/home/kn15263s/data/SVHN/", transform=data_transforms) num_classes = 10 input_size = 512 * 1 * 1 return dataset, num_classes, input_size elif name == 'STL10': data_transforms = transforms.Compose( [transforms.ToTensor(), normalize]) dataset = torchvision.datasets.STL10( root="/home/kn15263s/data/STL10/", transform=data_transforms) num_classes = 10 input_size = 512 * 3 * 3 return dataset, num_classes, input_size # elif name == 'Food': # # class Food(Dataset): # # def __init__(self, files, class_names, transform=transforms.ToTensor()): # # self.data = files # self.transform = transform # self.class_names = class_names # # def __getitem__(self, idx): # img = Image.open(self.data[idx]).convert('RGB') # name = self.data[idx].split('/')[-2] # y = self.class_names.index(name) # img = self.transform(img) # return img, y # # def __len__(self): # return len(self.data) # # data_transforms = transforms.Compose([ # transforms.RandomHorizontalFlip(), # transforms.RandomVerticalFlip(), # transforms.Resize((224, 224)), # transforms.ToTensor(), # normalize]) # # path = '/home/willy-huang/workspace/data/food' # files_training = glob(os.path.join(path, '*/*.jpg')) # class_names = [] # # for folder in os.listdir(os.path.join(path)): # class_names.append(folder) # # num_classes = len(class_names) # dataset = Food(files_training, class_names, data_transforms) # input_size = 512 * 7 * 7 # # return dataset, num_classes, input_size # # elif name == 'Stanford_dogs': # # class Stanford_dogs(Dataset): # # def __init__(self, files, class_names, transform=transforms.ToTensor()): # # self.data = files # self.transform = transform # self.class_names = class_names # # def __getitem__(self, idx): # img = Image.open(self.data[idx]).convert('RGB') # name = self.data[idx].split('/')[-2] # y = self.class_names.index(name) # img = self.transform(img) # return img, y # # def __len__(self): # return len(self.data) # # # data_transforms = transforms.Compose([ # transforms.RandomHorizontalFlip(), # transforms.RandomVerticalFlip(), # transforms.Resize((224, 224)), # transforms.ToTensor(), # normalize]) # # path = '/home/willy-huang/workspace/data/stanford_dogs' # files_training = glob(os.path.join(path, '*/*.jpg')) # class_names = [] # # for folder in os.listdir(os.path.join(path)): # class_names.append(folder) # # num_classes = len(class_names) # dataset = Stanford_dogs(files_training, class_names, data_transforms) # input_size = 512 * 7 * 7 # # return dataset, num_classes, input_size def _setup(self, config): random.seed(50) np.random.seed(50) torch.cuda.manual_seed_all(50) torch.manual_seed(50) self.total_time = time.time() self.name = args.Dataset_name nnArchitecture = args.Network_name dataset, num_class, input_size = self._get_dataset(self.name) num_total = len(dataset) shuffle = np.random.permutation(num_total) split_val = int(num_total * 0.2) train_idx, valid_idx = shuffle[split_val:], shuffle[:split_val] train_sampler = SubsetRandomSampler(train_idx) valid_sampler = SubsetRandomSampler(valid_idx) self.trainset_ld = DataLoader(dataset, batch_size=256, sampler=train_sampler, num_workers=4) self.validset_ld = DataLoader(dataset, batch_size=256, sampler=valid_sampler, num_workers=4) self.modelname = '{}--{}.pth.tar'.format(self.name, nnArchitecture) loggername = self.modelname.replace("pth.tar", "log") self.logger = utils.buildLogger(loggername) self.seed_table = np.array([ "", "epoch", "lr", "momentum", "weight_decay", "factor", "outLoss", "accuracy" ]) # ---- hyperparameters ---- self.lr = config["lr"] self.momentum = config["momentum"] self.weight_decay = config["weight_decay"] self.factor = config["factor"] self.epochID = 0 self.loss = nn.CrossEntropyLoss() self.accuracy = -999999999999.0 # -------------------- SETTINGS: NETWORK ARCHITECTURE if nnArchitecture == 'Vgg11': self.model = Vgg11(num_class, input_size).cuda() elif nnArchitecture == 'Resnet18': self.model = Resnet18(num_class, input_size).cuda() elif nnArchitecture == 'MobileNet': self.model = MobileNet(num_class, input_size).cuda() elif nnArchitecture == 'MobileNet_V2': self.model = MobileNet_V2(num_class, input_size).cuda() else: self.model = None assert 0 self.model = torch.nn.DataParallel(self.model).cuda() self.logger.info("Build Model Done") # -------------------- SETTINGS: OPTIMIZER & SCHEDULER -------------------- self.optimizer = optim.SGD(filter(lambda x: x.requires_grad, self.model.parameters()), lr=self.lr, momentum=self.momentum, weight_decay=self.weight_decay, nesterov=False) self.scheduler = optim.lr_scheduler.ReduceLROnPlateau( self.optimizer, factor=self.factor, patience=10, mode='min') self.logger.info("Build Optimizer Done") def _train_iteration(self): self.start_time = time.time() self.model.train() losstra = 0 losstraNorm = 0 for batchID, (input, target) in enumerate(self.trainset_ld): varInput = Variable(input).cuda() varTarget = Variable(target).cuda() varOutput = self.model(varInput) lossvalue = self.loss(varOutput, varTarget) losstra += lossvalue.item() losstraNorm += 1 self.optimizer.zero_grad() lossvalue.backward() torch.nn.utils.clip_grad_value_(self.model.parameters(), 10) self.optimizer.step() self.trainLoss = losstra / losstraNorm def _test(self): self.model.eval() lossVal = 0 lossValNorm = 0 correct = 0 num_samples = 0 for batchID, (input, target) in enumerate(self.validset_ld): with torch.no_grad(): varInput = Variable(input).cuda(async=True) varTarget = Variable(target).cuda(async=True) varOutput = self.model(varInput) losstensor = self.loss(varOutput, varTarget) pred = varOutput.argmax(1) correct += (pred == varTarget).sum().cpu() lossVal += losstensor.item() lossValNorm += 1 num_samples += len(input) self.outLoss = lossVal / lossValNorm accuracy = correct.item() / num_samples self.scheduler.step(self.outLoss, epoch=self.epochID) if accuracy > self.accuracy: self.accuracy = accuracy torch.save( { 'epoch': self.epochID + 1, 'state_dict': self.model.state_dict(), 'loss': self.outLoss, 'best_accuracy': self.accuracy, 'optimizer': self.optimizer.state_dict(), }, "./best_" + self.modelname) save = np.array([ self.seed_table, [ str(self.name), str(self.epochID + 1), str(self.lr), str(self.momentum), str(self.weight_decay), str(self.factor), str(self.outLoss), str(self.accuracy) ] ]) np.savetxt("./seed(50).csv", save, delimiter=',', fmt="%s") self.logger.info('Epoch [' + str(self.epochID + 1) + '] loss= {:.5f}'.format(self.outLoss) + ' ---- accuracy= {:.5f}'.format(accuracy) + ' ---- best_accuracy= {:.5f}'.format(self.accuracy) + ' ---- model: {}'.format(self.modelname) + ' ---- time: {:.1f} s'.format((time.time() - self.start_time)) + ' ---- total_time: {:.1f} s'.format( (time.time() - self.total_time))) self.epochID += 1 return { "episode_reward_mean": accuracy, "neg_mean_loss": self.outLoss, "mean_accuracy": accuracy, "epoch": self.epochID, 'mean_train_loss': self.trainLoss } def _train(self): self._train_iteration() return self._test() def _save(self, checkpoint_dir): checkpoint_path = os.path.join(checkpoint_dir, "final_model.pth") torch.save( { "epoch": self.epochID, "best_accuracy": self.accuracy, 'loss': self.outLoss, "state_dict": self.model.state_dict(), 'optimizer': self.optimizer.state_dict(), }, checkpoint_path) return checkpoint_path def _restore(self, checkpoint_path): self.model.load_state_dict(checkpoint_path)
def main(): # define empty list to store the losses and accuracy for ploting train_all_losses2 = [] train_all_acc2 = [] val_all_losses2 = [] val_all_acc2 = [] test_all_losses2 = 0.0 # define the training epoches epochs = 100 # instantiate Net class mobilenet = MobileNet() # use cuda to train the network mobilenet.to('cuda') #loss function and optimizer criterion = nn.BCELoss() learning_rate = 1e-3 optimizer = torch.optim.Adam(mobilenet.parameters(), lr=learning_rate, betas=(0.9, 0.999)) %load_ext memory_profiler best_acc = 0.0 for epoch in range(epochs): train(mobilenet, epoch, train_all_losses2, train_all_acc2) acc = validation(mobilenet, val_all_losses2, val_all_acc2, best_acc) # record the best model if acc > best_acc: checkpoint_path = './model_checkpoint.pth' best_acc = acc # save the model and optimizer torch.save({'model_state_dict': mobilenet.state_dict(), 'optimizer_state_dict': optimizer.state_dict()}, checkpoint_path) print('new best model saved') print("========================================================================") checkpoint_path = './model_checkpoint.pth' model = MobileNet().to('cuda') checkpoint = torch.load(checkpoint_path) print("model load successfully.") model.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) model.eval() attr_acc = [] test(model, attr_acc=attr_acc) # plot results plt.figure(figsize=(8, 10)) plt.barh(range(40), [100 * acc for acc in attr_acc], tick_label = attributes, fc = 'brown') plt.show() plt.figure(figsize=(8, 6)) plt.xlabel('Epochs') plt.ylabel('Loss') plt.title('Loss') plt.grid(True, linestyle='-.') plt.plot(train_all_losses2, c='salmon', label = 'Training Loss') plt.plot(val_all_losses2, c='brown', label = 'Validation Loss') plt.legend(fontsize='12', loc='upper right') plt.show() plt.figure(figsize=(8, 6)) plt.xlabel('Epochs') plt.ylabel('Accuracy') plt.title('Accuracy') plt.grid(True, linestyle='-.') plt.plot(train_all_acc2, c='salmon', label = 'Training Accuracy') plt.plot(val_all_acc2, c='brown', label = 'Validation Accuracy') plt.legend(fontsize='12', loc='lower right') plt.show()