os.makedirs(args.outdir) args.outdir_models = os.path.join(args.outdir, 'models', args.dataset_name) if not os.path.isdir(args.outdir_models): os.makedirs(args.outdir_models) args.logdir = os.path.join(args.outdir_models, '.log') if not os.path.isdir(args.logdir): os.makedirs(args.logdir) # write config file: args.config_dir = os.path.join(args.outdir_models, '.configs') if not os.path.isdir(args.config_dir): os.makedirs(args.config_dir) write_config(args, exp_name=os.path.join(args.config_dir, '{}'.format(args.dataset_name))) if args.verbose: print('Saved {}'.format( os.path.join(args.config_dir, '{}.txt'.format(args.dataset_name)))) if __name__ == '__main__': use_gpu = torch.cuda.is_available() init = Initializer(args.dataset_name, impath=None, info_file=None, verbose=True, feature_dim=args.feature_dim) # initialize files needed for training, like precomputed features init.initialize(dataset=False,
def train_multiclass(train_file, test_file, stat_file, model='mobilenet_v2', classes=('artist_name', 'genre', 'style', 'technique', 'century'), label_file='_user_labels.pkl', im_path='/export/home/kschwarz/Documents/Data/Wikiart_artist49_images', chkpt=None, weight_file=None, triplet_selector='semihard', margin=0.2, labels_per_class=4, samples_per_label=4, use_gpu=True, device=0, epochs=100, batch_size=32, lr=1e-4, momentum=0.9, log_interval=10, log_dir='runs', exp_name=None, seed=123): argvars = locals().copy() torch.manual_seed(seed) # LOAD DATASET with open(stat_file, 'r') as f: data = pickle.load(f) mean, std = data['mean'], data['std'] mean = [float(m) for m in mean] std = [float(s) for s in std] normalize = transforms.Normalize(mean=mean, std=std) train_transform = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(90), transforms.ToTensor(), normalize, ]) val_transform = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.ToTensor(), normalize, ]) if model.lower() == 'inception_v3': # change input size to 299 train_transform.transforms[0].size = (299, 299) val_transform.transforms[0].size = (299, 299) trainset = create_trainset(train_file, label_file, im_path, train_transform, classes) for c in classes: if len(trainset.labels_to_ints[c]) < labels_per_class: print('less labels in class {} than labels_per_class, use all available labels ({})' .format(c, len(trainset.labels_to_ints[c]))) valset = create_valset(test_file, im_path, val_transform, trainset.labels_to_ints) # PARAMETERS use_cuda = use_gpu and torch.cuda.is_available() if use_cuda: torch.cuda.set_device(device) torch.cuda.manual_seed_all(seed) if model.lower() not in ['squeezenet', 'mobilenet_v1', 'mobilenet_v2', 'vgg16_bn', 'inception_v3', 'alexnet']: assert False, 'Unknown model {}\n\t+ Choose from: ' \ '[sqeezenet, mobilenet_v1, mobilenet_v2, vgg16_bn, inception_v3, alexnet].'.format(model) elif model.lower() == 'mobilenet_v1': bodynet = mobilenet_v1(pretrained=weight_file is None) elif model.lower() == 'mobilenet_v2': bodynet = mobilenet_v2(pretrained=weight_file is None) elif model.lower() == 'vgg16_bn': bodynet = vgg16_bn(pretrained=weight_file is None) elif model.lower() == 'inception_v3': bodynet = inception_v3(pretrained=weight_file is None) elif model.lower() == 'alexnet': bodynet = alexnet(pretrained=weight_file is None) else: # squeezenet bodynet = squeezenet(pretrained=weight_file is None) # Load weights for the body network if weight_file is not None: print("=> loading weights from '{}'".format(weight_file)) pretrained_dict = torch.load(weight_file, map_location=lambda storage, loc: storage)['state_dict'] state_dict = bodynet.state_dict() pretrained_dict = {k.replace('bodynet.', ''): v for k, v in pretrained_dict.items() # in case of multilabel weight file if (k.replace('bodynet.', '') in state_dict.keys() and v.shape == state_dict[k.replace('bodynet.', '')].shape)} # number of classes might have changed # check which weights will be transferred if not pretrained_dict == state_dict: # some changes were made for k in set(state_dict.keys() + pretrained_dict.keys()): if k in state_dict.keys() and k not in pretrained_dict.keys(): print('\tWeights for "{}" were not found in weight file.'.format(k)) elif k in pretrained_dict.keys() and k not in state_dict.keys(): print('\tWeights for "{}" were are not part of the used model.'.format(k)) elif state_dict[k].shape != pretrained_dict[k].shape: print('\tShapes of "{}" are different in model ({}) and weight file ({}).'. format(k, state_dict[k].shape, pretrained_dict[k].shape)) else: # everything is good pass state_dict.update(pretrained_dict) bodynet.load_state_dict(state_dict) net = MetricNet(bodynet, len(classes)) n_parameters = sum([p.data.nelement() for p in net.parameters() if p.requires_grad]) if use_cuda: net = net.cuda() print('Using {}\n\t+ Number of params: {}'.format(str(net).split('(', 1)[0], n_parameters)) if not os.path.isdir(log_dir): os.makedirs(log_dir) # tensorboard summary writer timestamp = time.strftime('%m-%d-%H-%M') expname = timestamp + '_' + str(net).split('(', 1)[0] if exp_name is not None: expname = expname + '_' + exp_name log = TBPlotter(os.path.join(log_dir, 'tensorboard', expname)) log.print_logdir() # allow auto-tuner to find best algorithm for the hardware cudnn.benchmark = True with open(label_file, 'rb') as f: labels = pickle.load(f)['labels'] n_labeled = '\t'.join([str(Counter(l).items()) for l in labels.transpose()]) write_config(argvars, os.path.join(log_dir, expname), extras={'n_labeled': n_labeled}) # ININTIALIZE TRAINING optimizer = optim.SGD(net.parameters(), lr=lr, momentum=momentum) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=10, threshold=1e-1, verbose=True) if triplet_selector.lower() not in ['random', 'semihard', 'hardest', 'mixed', 'khardest']: assert False, 'Unknown option {} for triplet selector. Choose from "random", "semihard", "hardest" or "mixed"' \ '.'.format(triplet_selector) elif triplet_selector.lower() == 'random': criterion = TripletLoss(margin=margin, triplet_selector=RandomNegativeTripletSelector(margin, cpu=not use_cuda)) elif triplet_selector.lower() == 'semihard' or triplet_selector.lower() == 'mixed': criterion = TripletLoss(margin=margin, triplet_selector=SemihardNegativeTripletSelector(margin, cpu=not use_cuda)) elif triplet_selector.lower() == 'khardest': criterion = TripletLoss(margin=margin, triplet_selector=KHardestNegativeTripletSelector(margin, k=3, cpu=not use_cuda)) else: criterion = TripletLoss(margin=margin, triplet_selector=HardestNegativeTripletSelector(margin, cpu=not use_cuda)) if use_cuda: criterion = criterion.cuda() kwargs = {'num_workers': 4} if use_cuda else {} multilabel_train = np.stack([trainset.df[c].values for c in classes]).transpose() train_batch_sampler = BalancedBatchSamplerMulticlass(multilabel_train, n_label=labels_per_class, n_per_label=samples_per_label, ignore_label=None) trainloader = DataLoader(trainset, batch_sampler=train_batch_sampler, **kwargs) multilabel_val = np.stack([valset.df[c].values for c in classes]).transpose() val_batch_sampler = BalancedBatchSamplerMulticlass(multilabel_val, n_label=labels_per_class, n_per_label=samples_per_label, ignore_label=None) valloader = DataLoader(valset, batch_sampler=val_batch_sampler, **kwargs) # optionally resume from a checkpoint start_epoch = 1 if chkpt is not None: if os.path.isfile(chkpt): print("=> loading checkpoint '{}'".format(chkpt)) checkpoint = torch.load(chkpt, map_location=lambda storage, loc: storage) start_epoch = checkpoint['epoch'] best_acc_score = checkpoint['best_acc_score'] best_acc = checkpoint['acc'] net.load_state_dict(checkpoint['state_dict']) print("=> loaded checkpoint '{}' (epoch {})" .format(chkpt, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(chkpt)) def train(epoch): losses = AverageMeter() gtes = AverageMeter() non_zero_triplets = AverageMeter() distances_ap = AverageMeter() distances_an = AverageMeter() # switch to train mode net.train() for batch_idx, (data, target) in enumerate(trainloader): target = torch.stack(target) if use_cuda: data, target = Variable(data.cuda()), [Variable(t.cuda()) for t in target] else: data, target = Variable(data), [Variable(t) for t in target] # compute output outputs = net(data) # normalize features for i in range(len(classes)): outputs[i] = torch.nn.functional.normalize(outputs[i], p=2, dim=1) loss = Variable(torch.Tensor([0]), requires_grad=True).type_as(data[0]) n_triplets = 0 for op, tgt in zip(outputs, target): # filter unlabeled samples if there are any (have label -1) labeled = (tgt != -1).nonzero().view(-1) op, tgt = op[labeled], tgt[labeled] l, nt = criterion(op, tgt) loss += l n_triplets += nt non_zero_triplets.update(n_triplets, target[0].size(0)) # measure GTE and record loss gte, dist_ap, dist_an = GTEMulticlass(outputs, target) # do not compute ap pairs for concealed classes gtes.update(gte.data, target[0].size(0)) distances_ap.update(dist_ap.data, target[0].size(0)) distances_an.update(dist_an.data, target[0].size(0)) losses.update(loss.data[0], target[0].size(0)) # compute gradient and do optimizer step optimizer.zero_grad() loss.backward() optimizer.step() if batch_idx % log_interval == 0: print('Train Epoch: {} [{}/{}]\t' 'Loss: {:.4f} ({:.4f})\t' 'GTE: {:.2f}% ({:.2f}%)\t' 'Non-zero Triplets: {:d} ({:d})'.format( epoch, batch_idx * len(target[0]), len(trainloader) * len(target[0]), float(losses.val), float(losses.avg), float(gtes.val) * 100., float(gtes.avg) * 100., int(non_zero_triplets.val), int(non_zero_triplets.avg))) # log avg values to somewhere log.write('loss', float(losses.avg), epoch, test=False) log.write('gte', float(gtes.avg), epoch, test=False) log.write('non-zero trplts', int(non_zero_triplets.avg), epoch, test=False) log.write('dist_ap', float(distances_ap.avg), epoch, test=False) log.write('dist_an', float(distances_an.avg), epoch, test=False) def test(epoch): losses = AverageMeter() gtes = AverageMeter() non_zero_triplets = AverageMeter() distances_ap = AverageMeter() distances_an = AverageMeter() # switch to evaluation mode net.eval() for batch_idx, (data, target) in enumerate(valloader): target = torch.stack(target) if use_cuda: data, target = Variable(data.cuda()), [Variable(t.cuda()) for t in target] else: data, target = Variable(data), [Variable(t) for t in target] # compute output outputs = net(data) # normalize features for i in range(len(classes)): outputs[i] = torch.nn.functional.normalize(outputs[i], p=2, dim=1) loss = Variable(torch.Tensor([0]), requires_grad=True).type_as(data[0]) n_triplets = 0 for op, tgt in zip(outputs, target): # filter unlabeled samples if there are any (have label -1) labeled = (tgt != -1).nonzero().view(-1) op, tgt = op[labeled], tgt[labeled] l, nt = criterion(op, tgt) loss += l n_triplets += nt non_zero_triplets.update(n_triplets, target[0].size(0)) # measure GTE and record loss gte, dist_ap, dist_an = GTEMulticlass(outputs, target) gtes.update(gte.data.cpu(), target[0].size(0)) distances_ap.update(dist_ap.data.cpu(), target[0].size(0)) distances_an.update(dist_an.data.cpu(), target[0].size(0)) losses.update(loss.data[0].cpu(), target[0].size(0)) print('\nVal set: Average loss: {:.4f} Average GTE {:.2f}%, ' 'Average non-zero triplets: {:d} LR: {:.6f}'.format(float(losses.avg), float(gtes.avg) * 100., int(non_zero_triplets.avg), optimizer.param_groups[-1]['lr'])) log.write('loss', float(losses.avg), epoch, test=True) log.write('gte', float(gtes.avg), epoch, test=True) log.write('non-zero trplts', int(non_zero_triplets.avg), epoch, test=True) log.write('dist_ap', float(distances_ap.avg), epoch, test=True) log.write('dist_an', float(distances_an.avg), epoch, test=True) return losses.avg, 1 - gtes.avg if start_epoch == 1: # compute baseline: _, best_acc = test(epoch=0) else: # checkpoint was loaded best_acc = best_acc for epoch in range(start_epoch, epochs + 1): if triplet_selector.lower() == 'mixed' and epoch == 26: criterion.triplet_selector = HardestNegativeTripletSelector(margin, cpu=not use_cuda) print('Changed negative selection from semihard to hardest.') # train for one epoch train(epoch) # evaluate on validation set val_loss, val_acc = test(epoch) scheduler.step(val_loss) # remember best acc and save checkpoint is_best = val_acc > best_acc best_acc = max(val_acc, best_acc) save_checkpoint({ 'epoch': epoch, 'state_dict': net.state_dict(), 'best_acc': best_acc, }, is_best, expname, directory=log_dir) if optimizer.param_groups[-1]['lr'] < 1e-5: print('Learning rate reached minimum threshold. End training.') break # report best values best = torch.load(os.path.join(log_dir, expname + '_model_best.pth.tar'), map_location=lambda storage, loc: storage) print('Finished training after epoch {}:\n\tbest acc score: {}' .format(best['epoch'], best['acc'])) print('Best model mean accuracy: {}'.format(best_acc))
def main(): args.task_selection = args.task_selection.split(',') torch.manual_seed(args.seed) # LOAD DATASET stat_file = args.stat_file with open(stat_file, 'r') as f: data = pickle.load(f) mean, std = data['mean'], data['std'] mean = [float(m) for m in mean] std = [float(s) for s in std] normalize = transforms.Normalize(mean=mean, std=std) train_transform = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(90), transforms.ToTensor(), normalize, ]) val_transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), normalize, ]) if not args.shape_dataset: if args.task_selection is not None: classes = args.task_selection elif args.office_dataset: classes = ['style', 'genre'] elif args.bam_dataset: classes = ['content', 'emotion', 'media'] else: classes = ['artist_name', 'genre', 'style', 'technique', 'century'] valset = Wikiart(path_to_info_file=args.val_file, path_to_images=args.im_path, classes=classes, transform=val_transform) trainset = Wikiart(path_to_info_file=args.train_file, path_to_images=args.im_path, classes=classes, transform=train_transform) else: if args.task_selection is not None: classes = args.task_selection else: classes = ['shape', 'n_shapes', 'color_shape', 'color_background'] valset = ShapeDataset(root_dir='/export/home/kschwarz/Documents/Data/Geometric_Shapes', split='val', classes=classes, transform=val_transform) trainset = ShapeDataset(root_dir='/export/home/kschwarz/Documents/Data/Geometric_Shapes', split='train', classes=classes, transform=train_transform) if not trainset.labels_to_ints == valset.labels_to_ints: print('validation set and training set int labels do not match. Use int conversion of trainset') print(trainset.labels_to_ints, valset.labels_to_ints) valset.labels_to_ints = trainset.labels_to_ints.copy() num_labels = [len(trainset.labels_to_ints[c]) for c in classes] # PARAMETERS use_cuda = args.use_gpu and torch.cuda.is_available() device_nb = args.device if use_cuda: torch.cuda.set_device(device_nb) torch.cuda.manual_seed_all(args.seed) # INITIALIZE NETWORK if args.model.lower() not in ['mobilenet_v2', 'vgg16_bn']: raise NotImplementedError('Unknown Model {}\n\t+ Choose from: [mobilenet_v2, vgg16_bn].' .format(args.model)) elif args.model.lower() == 'mobilenet_v2': featurenet = mobilenet_v2(pretrained=True) elif args.model.lower() == 'vgg16_bn': featurenet = vgg16_bn(pretrained=True) if args.not_narrow: bodynet = featurenet else: bodynet = narrownet(featurenet, dim_feature_out=args.feature_dim) net = OctopusNet(bodynet, n_labels=num_labels) n_parameters = sum([p.data.nelement() for p in net.parameters() if p.requires_grad]) if use_cuda: net = net.cuda() print('Using {}\n\t+ Number of params: {}'.format(str(bodynet).split('(')[0], n_parameters)) # LOG/SAVE OPTIONS log_interval = args.log_interval log_dir = args.log_dir if not os.path.isdir(log_dir): os.makedirs(log_dir) # tensorboard summary writerR timestamp = time.strftime('%m-%d-%H-%M') if args.shape_dataset: expname = timestamp + '_ShapeDataset_' + str(bodynet).split('(')[0] else: expname = timestamp + '_' + str(bodynet).split('(')[0] if args.exp_name is not None: expname = expname + '_' + args.exp_name log = TBPlotter(os.path.join(log_dir, 'tensorboard', expname)) log.print_logdir() # allow auto-tuner to find best algorithm for the hardware cudnn.benchmark = True write_config(args, os.path.join(log_dir, expname)) # ININTIALIZE TRAINING optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=10, threshold=1e-1, verbose=True) criterion = nn.CrossEntropyLoss() if use_cuda: criterion = criterion.cuda() kwargs = {'num_workers': 8, 'pin_memory': True} if use_cuda else {} trainloader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, **kwargs) valloader = DataLoader(valset, batch_size=args.batch_size, shuffle=True, **kwargs) # optionally resume from a checkpoint start_epoch = 1 if args.chkpt is not None: if os.path.isfile(args.chkpt): print("=> loading checkpoint '{}'".format(args.chkpt)) checkpoint = torch.load(args.chkpt, map_location=lambda storage, loc: storage) start_epoch = checkpoint['epoch'] best_acc_score = checkpoint['best_acc_score'] best_acc = checkpoint['acc'] net.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) scheduler.load_state_dict(checkpoint['scheduler']) print("=> loaded checkpoint '{}' (epoch {})" .format(args.chkpt, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.chkpt)) def train(epoch): losses = AverageMeter() accs = AverageMeter() class_acc = [AverageMeter() for i in range(len(classes))] # switch to train mode net.train() for batch_idx, (data, target) in enumerate(trainloader): if use_cuda: data, target = Variable(data.cuda()), [Variable(t.cuda()) for t in target] else: data, target = Variable(data), [Variable(t) for t in target] # compute output outputs = net(data) preds = [torch.max(outputs[i], 1)[1] for i in range(len(classes))] loss = Variable(torch.Tensor([0])).type_as(data[0]) for i, o, t, p in zip(range(len(classes)), outputs, target, preds): # in case of None labels mask = t != -1 if mask.sum() == 0: continue o, t, p = o[mask], t[mask], p[mask] loss += criterion(o, t) # measure class accuracy and record loss class_acc[i].update((torch.sum(p == t).type(torch.FloatTensor) / t.size(0)).data) accs.update(torch.mean(torch.stack([class_acc[i].val for i in range(len(classes))])), target[0].size(0)) losses.update(loss.data, target[0].size(0)) # compute gradient and do optimizer step optimizer.zero_grad() loss.backward() optimizer.step() if batch_idx % log_interval == 0: print('Train Epoch: {} [{}/{}]\t' 'Loss: {:.4f} ({:.4f})\t' 'Acc: {:.2f}% ({:.2f}%)'.format( epoch, batch_idx * len(target), len(trainloader.dataset), float(losses.val), float(losses.avg), float(accs.val) * 100., float(accs.avg) * 100.)) print('\t' + '\n\t'.join(['{}: {:.2f}%'.format(classes[i], float(class_acc[i].val) * 100.) for i in range(len(classes))])) # log avg values to somewhere log.write('loss', float(losses.avg), epoch, test=False) log.write('acc', float(accs.avg), epoch, test=False) for i in range(len(classes)): log.write('class_acc', float(class_acc[i].avg), epoch, test=False) def test(epoch): losses = AverageMeter() accs = AverageMeter() class_acc = [AverageMeter() for i in range(len(classes))] # switch to evaluation mode net.eval() for batch_idx, (data, target) in enumerate(valloader): if use_cuda: data, target = Variable(data.cuda()), [Variable(t.cuda()) for t in target] else: data, target = Variable(data), [Variable(t) for t in target] # compute output outputs = net(data) preds = [torch.max(outputs[i], 1)[1] for i in range(len(classes))] loss = Variable(torch.Tensor([0])).type_as(data[0]) for i, o, t, p in zip(range(len(classes)), outputs, target, preds): # in case of None labels mask = t != -1 if mask.sum() == 0: continue o, t, p = o[mask], t[mask], p[mask] loss += criterion(o, t) # measure class accuracy and record loss class_acc[i].update((torch.sum(p == t).type(torch.FloatTensor) / t.size(0)).data) accs.update(torch.mean(torch.stack([class_acc[i].val for i in range(len(classes))])), target[0].size(0)) losses.update(loss.data, target[0].size(0)) score = accs.avg - torch.std(torch.stack([class_acc[i].avg for i in range( len(classes))])) / accs.avg # compute mean - std/mean as measure for accuracy print('\nVal set: Average loss: {:.4f} Average acc {:.2f}% Acc score {:.2f} LR: {:.6f}' .format(float(losses.avg), float(accs.avg) * 100., float(score), optimizer.param_groups[-1]['lr'])) print('\t' + '\n\t'.join(['{}: {:.2f}%'.format(classes[i], float(class_acc[i].avg) * 100.) for i in range(len(classes))])) log.write('loss', float(losses.avg), epoch, test=True) log.write('acc', float(accs.avg), epoch, test=True) for i in range(len(classes)): log.write('class_acc', float(class_acc[i].avg), epoch, test=True) return losses.avg.cpu().numpy(), float(score), float(accs.avg), [float(class_acc[i].avg) for i in range(len(classes))] if start_epoch == 1: # compute baseline: _, best_acc_score, best_acc, _ = test(epoch=0) else: # checkpoint was loaded best_acc_score = best_acc_score best_acc = best_acc for epoch in range(start_epoch, args.epochs + 1): # train for one epoch train(epoch) # evaluate on validation set val_loss, val_acc_score, val_acc, val_class_accs = test(epoch) scheduler.step(val_loss) # remember best acc and save checkpoint is_best = val_acc_score > best_acc_score best_acc_score = max(val_acc_score, best_acc_score) save_checkpoint({ 'epoch': epoch, 'state_dict': net.state_dict(), 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict(), 'best_acc_score': best_acc_score, 'acc': val_acc, 'class_acc': {c: a for c, a in zip(classes, val_class_accs)} }, is_best, expname, directory=log_dir) if val_acc > best_acc: shutil.copyfile(os.path.join(log_dir, expname + '_checkpoint.pth.tar'), os.path.join(log_dir, expname + '_model_best_mean_acc.pth.tar')) best_acc = max(val_acc, best_acc) if optimizer.param_groups[-1]['lr'] < 1e-5: print('Learning rate reached minimum threshold. End training.') break # report best values try: best = torch.load(os.path.join(log_dir, expname + '_model_best.pth.tar'), map_location=lambda storage, loc: storage) except IOError: # could be only one task best = torch.load(os.path.join(log_dir, expname + '_model_best_mean_acc.pth.tar'), map_location=lambda storage, loc: storage) print('Finished training after epoch {}:\n\tbest acc score: {}\n\tacc: {}\n\t class acc: {}' .format(best['epoch'], best['best_acc_score'], best['acc'], best['class_acc'])) print('Best model mean accuracy: {}'.format(best_acc)) try: shutil.copyfile(os.path.join(log_dir, expname + '_model_best.pth.tar'), os.path.join('models', expname + '_model_best.pth.tar')) except IOError: # could be only one task shutil.copyfile(os.path.join(log_dir, expname + '_model_best_mean_acc.pth.tar'), os.path.join('models', expname + '_model_best.pth.tar'))
def train_multiclass( train_file, test_file, stat_file, model='mobilenet_v2', classes=('artist_name', 'genre', 'style', 'technique', 'century'), im_path='/export/home/kschwarz/Documents/Data/Wikiart_artist49_images', label_file='_user_labels.pkl', chkpt=None, weight_file=None, use_gpu=True, device=0, epochs=100, batch_size=32, lr=1e-4, momentum=0.9, log_interval=10, log_dir='runs', exp_name=None, seed=123): argvars = locals().copy() torch.manual_seed(seed) # LOAD DATASET with open(stat_file, 'r') as f: data = pickle.load(f) mean, std = data['mean'], data['std'] mean = [float(m) for m in mean] std = [float(s) for s in std] normalize = transforms.Normalize(mean=mean, std=std) train_transform = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(90), transforms.ToTensor(), normalize, ]) val_transform = transforms.Compose([ transforms.RandomResizedCrop(224), transforms.ToTensor(), normalize, ]) if model.lower() == 'inception_v3': # change input size to 299 train_transform.transforms[0].size = (299, 299) val_transform.transforms[0].size = (299, 299) trainset = create_trainset(train_file, label_file, im_path, train_transform, classes) valset = create_valset(test_file, im_path, val_transform, trainset.labels_to_ints) num_labels = [len(trainset.labels_to_ints[c]) for c in classes] # PARAMETERS use_cuda = use_gpu and torch.cuda.is_available() if use_cuda: torch.cuda.set_device(device) torch.cuda.manual_seed_all(seed) if model.lower() not in [ 'squeezenet', 'mobilenet_v1', 'mobilenet_v2', 'vgg16_bn', 'inception_v3', 'alexnet' ]: assert False, 'Unknown model {}\n\t+ Choose from: ' \ '[sqeezenet, mobilenet_v1, mobilenet_v2, vgg16_bn, inception_v3, alexnet].'.format(model) elif model.lower() == 'mobilenet_v1': bodynet = mobilenet_v1(pretrained=weight_file is None) elif model.lower() == 'mobilenet_v2': bodynet = mobilenet_v2(pretrained=weight_file is None) elif model.lower() == 'vgg16_bn': bodynet = vgg16_bn(pretrained=weight_file is None) elif model.lower() == 'inception_v3': bodynet = inception_v3(pretrained=weight_file is None) elif model.lower() == 'alexnet': bodynet = alexnet(pretrained=weight_file is None) else: # squeezenet bodynet = squeezenet(pretrained=weight_file is None) # Load weights for the body network if weight_file is not None: print("=> loading weights from '{}'".format(weight_file)) pretrained_dict = torch.load( weight_file, map_location=lambda storage, loc: storage)['state_dict'] state_dict = bodynet.state_dict() pretrained_dict = { k.replace('bodynet.', ''): v for k, v in pretrained_dict.items() # in case of multilabel weight file if (k.replace('bodynet.', '') in state_dict.keys() and v.shape == state_dict[k.replace('bodynet.', '')].shape) } # number of classes might have changed # check which weights will be transferred if not pretrained_dict == state_dict: # some changes were made for k in set(state_dict.keys() + pretrained_dict.keys()): if k in state_dict.keys() and k not in pretrained_dict.keys(): print('\tWeights for "{}" were not found in weight file.'. format(k)) elif k in pretrained_dict.keys() and k not in state_dict.keys( ): print( '\tWeights for "{}" were are not part of the used model.' .format(k)) elif state_dict[k].shape != pretrained_dict[k].shape: print( '\tShapes of "{}" are different in model ({}) and weight file ({}).' .format(k, state_dict[k].shape, pretrained_dict[k].shape)) else: # everything is good pass state_dict.update(pretrained_dict) bodynet.load_state_dict(state_dict) net = OctopusNet(bodynet, n_labels=num_labels) n_parameters = sum( [p.data.nelement() for p in net.parameters() if p.requires_grad]) if use_cuda: net = net.cuda() print('Using {}\n\t+ Number of params: {}'.format( str(net).split('(', 1)[0], n_parameters)) if not os.path.isdir(log_dir): os.makedirs(log_dir) # tensorboard summary writer timestamp = time.strftime('%m-%d-%H-%M') expname = timestamp + '_' + str(net).split('(', 1)[0] if exp_name is not None: expname = expname + '_' + exp_name log = TBPlotter(os.path.join(log_dir, 'tensorboard', expname)) log.print_logdir() # allow auto-tuner to find best algorithm for the hardware cudnn.benchmark = True with open(label_file, 'rb') as f: labels = pickle.load(f)['labels'] n_labeled = '\t'.join( [str(Counter(l).items()) for l in labels.transpose()]) write_config(argvars, os.path.join(log_dir, expname), extras={'n_labeled': n_labeled}) # ININTIALIZE TRAINING optimizer = optim.SGD(net.parameters(), lr=lr, momentum=momentum) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=10, threshold=1e-1, verbose=True) criterion = nn.CrossEntropyLoss() if use_cuda: criterion = criterion.cuda() kwargs = {'num_workers': 4} if use_cuda else {} trainloader = DataLoader(trainset, batch_size=batch_size, shuffle=True, **kwargs) valloader = DataLoader(valset, batch_size=batch_size, shuffle=True, **kwargs) # optionally resume from a checkpoint start_epoch = 1 if chkpt is not None: if os.path.isfile(chkpt): print("=> loading checkpoint '{}'".format(chkpt)) checkpoint = torch.load(chkpt, map_location=lambda storage, loc: storage) start_epoch = checkpoint['epoch'] best_acc_score = checkpoint['best_acc_score'] best_acc = checkpoint['acc'] net.load_state_dict(checkpoint['state_dict']) print("=> loaded checkpoint '{}' (epoch {})".format( chkpt, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(chkpt)) def train(epoch): losses = AverageMeter() accs = AverageMeter() class_acc = [AverageMeter() for i in range(len(classes))] # switch to train mode net.train() for batch_idx, (data, target) in enumerate(trainloader): if use_cuda: data, target = Variable( data.cuda()), [Variable(t.cuda()) for t in target] else: data, target = Variable(data), [Variable(t) for t in target] # compute output outputs = net(data) preds = [torch.max(outputs[i], 1)[1] for i in range(len(classes))] loss = Variable(torch.Tensor([0]), requires_grad=True).type_as(data[0]) for i, o, t, p in zip(range(len(classes)), outputs, target, preds): # filter unlabeled samples if there are any (have label -1) labeled = (t != -1).nonzero().view(-1) o, t, p = o[labeled], t[labeled], p[labeled] loss += criterion(o, t) # measure class accuracy and record loss class_acc[i].update( (torch.sum(p == t).type(torch.FloatTensor) / t.size(0)).data) accs.update( torch.mean( torch.stack( [class_acc[i].val for i in range(len(classes))])), target[0].size(0)) losses.update(loss.data, target[0].size(0)) # compute gradient and do optimizer step optimizer.zero_grad() loss.backward() optimizer.step() if batch_idx % log_interval == 0: print('Train Epoch: {} [{}/{}]\t' 'Loss: {:.4f} ({:.4f})\t' 'Acc: {:.2f}% ({:.2f}%)'.format(epoch, batch_idx * len(target), len(trainloader.dataset), float(losses.val), float(losses.avg), float(accs.val) * 100., float(accs.avg) * 100.)) print('\t' + '\n\t'.join([ '{}: {:.2f}%'.format(classes[i], float(class_acc[i].val) * 100.) for i in range(len(classes)) ])) # log avg values to somewhere log.write('loss', float(losses.avg), epoch, test=False) log.write('acc', float(accs.avg), epoch, test=False) for i in range(len(classes)): log.write('class_acc', float(class_acc[i].avg), epoch, test=False) def test(epoch): losses = AverageMeter() accs = AverageMeter() class_acc = [AverageMeter() for i in range(len(classes))] # switch to evaluation mode net.eval() for batch_idx, (data, target) in enumerate(valloader): if use_cuda: data, target = Variable( data.cuda()), [Variable(t.cuda()) for t in target] else: data, target = Variable(data), [Variable(t) for t in target] # compute output outputs = net(data) preds = [torch.max(outputs[i], 1)[1] for i in range(len(classes))] loss = Variable(torch.Tensor([0]), requires_grad=True).type_as(data[0]) for i, o, t, p in zip(range(len(classes)), outputs, target, preds): labeled = (t != -1).nonzero().view(-1) loss += criterion(o[labeled], t[labeled]) # measure class accuracy and record loss class_acc[i].update((torch.sum(p[labeled] == t[labeled]).type( torch.FloatTensor) / t[labeled].size(0)).data) accs.update( torch.mean( torch.stack( [class_acc[i].val for i in range(len(classes))])), target[0].size(0)) losses.update(loss.data, target[0].size(0)) score = accs.avg - torch.std( torch.stack([class_acc[i].avg for i in range(len(classes))]) ) / accs.avg # compute mean - std/mean as measure for accuracy print( '\nVal set: Average loss: {:.4f} Average acc {:.2f}% Acc score {:.2f} LR: {:.6f}' .format(float(losses.avg), float(accs.avg) * 100., float(score), optimizer.param_groups[-1]['lr'])) print('\t' + '\n\t'.join([ '{}: {:.2f}%'.format(classes[i], float(class_acc[i].avg) * 100.) for i in range(len(classes)) ])) log.write('loss', float(losses.avg), epoch, test=True) log.write('acc', float(accs.avg), epoch, test=True) for i in range(len(classes)): log.write('class_acc', float(class_acc[i].avg), epoch, test=True) return losses.avg.cpu().numpy(), float(score), float( accs.avg), [float(class_acc[i].avg) for i in range(len(classes))] if start_epoch == 1: # compute baseline: _, best_acc_score, best_acc, _ = test(epoch=0) else: # checkpoint was loaded best_acc_score = best_acc_score best_acc = best_acc for epoch in range(start_epoch, epochs + 1): # train for one epoch train(epoch) # evaluate on validation set val_loss, val_acc_score, val_acc, val_class_accs = test(epoch) scheduler.step(val_loss) # remember best acc and save checkpoint is_best = val_acc_score > best_acc_score best_acc_score = max(val_acc_score, best_acc_score) save_checkpoint( { 'epoch': epoch, 'state_dict': net.state_dict(), 'best_acc_score': best_acc_score, 'acc': val_acc, 'class_acc': {c: a for c, a in zip(classes, val_class_accs)} }, is_best, expname, directory=log_dir) if val_acc > best_acc: shutil.copyfile( os.path.join(log_dir, expname + '_checkpoint.pth.tar'), os.path.join(log_dir, expname + '_model_best_mean_acc.pth.tar')) best_acc = max(val_acc, best_acc) if optimizer.param_groups[-1]['lr'] < 1e-5: print('Learning rate reached minimum threshold. End training.') break # report best values best = torch.load(os.path.join(log_dir, expname + '_model_best.pth.tar'), map_location=lambda storage, loc: storage) print( 'Finished training after epoch {}:\n\tbest acc score: {}\n\tacc: {}\n\t class acc: {}' .format(best['epoch'], best['best_acc_score'], best['acc'], best['class_acc'])) print('Best model mean accuracy: {}'.format(best_acc))