def load_image(fileList, transforms, shuffle=True, pin_memory=True): loader = torch.utils.data.DataLoader(ImageList(root=args.root_path, fileList=fileList, transform=transforms), batch_size=args.batch_size, shuffle=shuffle, num_workers=args.workers, pin_memory=pin_memory) return loader
def load_data_set(batch_size=20, set='train'): root_path = '/Users/benjaminchew/Documents/GitHub/DeepPotatoVision/' data_list = root_path + set + '.txt' data_loader = torch.utils.data.DataLoader( ImageList(root=root_path, fileList=data_list, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), loader=rgb_loader), batch_size=batch_size, shuffle=True, #True, #num_workers=args.workers, pin_memory=True) return data_loader
def main(): global args args = parser.parse_args() # create Light CNN for face recognition model = LightCNN(pretrained=False, num_classes=args.num_classes) if args.cuda: model = torch.nn.DataParallel(model).cuda() print(model) # large lr for last fc parameters params = [] for name, value in model.named_parameters(): if 'bias' in name: if 'fc2' in name: params += [{ 'params': value, 'lr': 20 * args.lr, 'weight_decay': 0 }] else: params += [{ 'params': value, 'lr': 2 * args.lr, 'weight_decay': 0 }] else: if 'fc2' in name: params += [{'params': value, 'lr': 10 * args.lr}] else: params += [{'params': value, 'lr': 1 * args.lr}] optimizer = torch.optim.SGD(params, args.lr, momentum=args.momentum, weight_decay=args.weight_decay) # optionally resume from a checkpoint if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})".format( args.resume, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.resume)) cudnn.benchmark = True #load image train_loader = torch.utils.data.DataLoader(ImageList( root=args.root_path, fileList=args.train_list, transform=transforms.Compose([ transforms.RandomCrop(128), transforms.RandomHorizontalFlip(), transforms.ToTensor(), ])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) val_loader = torch.utils.data.DataLoader(ImageList( root=args.root_path, fileList=args.val_list, transform=transforms.Compose([ transforms.CenterCrop(128), transforms.ToTensor(), ])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) # define loss function and optimizer criterion = nn.CrossEntropyLoss() if args.cuda: criterion.cuda() validate(val_loader, model, criterion) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) # train for one epoch train(train_loader, model, criterion, optimizer, epoch) # evaluate on validation set prec1 = validate(val_loader, model, criterion) save_name = args.save_path + 'lightCNN_' + str( epoch + 1) + '_checkpoint.pth.tar' save_checkpoint( { 'epoch': epoch + 1, 'arch': args.arch, 'state_dict': model.state_dict(), 'prec1': prec1, 'optimizer': optimizer.state_dict(), }, save_name)
#img = img*128 #mean = torch.FloatTensor([27]) #img = img + mean img = ndimage.gaussian_filter(img, sigma=2) img = misc.imresize(img, (180, 180)) #/255 #img = img/img.max() #img = Image.fromarray(np.uint8(img*255),'L') img = Image.fromarray(np.uint8(img), 'L') #img = ImageOps.invert(img) img.save("/home/iimtech5/Deep_Net/result/" + "out_{:04d}.png".format(count)) count += 1 test_loader = torch.utils.data.DataLoader(ImageList(root=args.root_path, fileList=args.test_list, transform=None), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) model = DeepNet() model.load_state_dict( torch.load('./Model_Save_new/args.model_args.dataset_5_checkpoint.pth.tar') ['state_dict']) model = model.to(device) print(model) print('model loaded and test started') #model.eval() test(test_loader, model)
def main(): global args args = parser.parse_args() model = LightCNN_9Layers(num_classes=args.num_classes) if args.cuda: model = model.cuda() print(model) #optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay) optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) # optionally resume from a checkpoint if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] model.load_state_dict(checkpoint['state_dict']) print("=> loaded checkpoint '{}' (epoch {})" .format(args.resume, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.resume)) cudnn.benchmark = True #load image train_loader = torch.utils.data.DataLoader( ImageList(root=args.root_path, fileList=args.train_list, transform=transforms.Compose([ transforms.RandomCrop((202, 162)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), ])), batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True) val_loader = torch.utils.data.DataLoader( ImageList(root=args.root_path, fileList=args.val_list, transform=transforms.Compose([ transforms.CenterCrop((202, 162)), transforms.ToTensor(), ])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) # define loss function and optimizer #criterion = nn.CrossEntropyLoss() #criterion = nn.L1Loss() #criterion = nn.MSELoss() criterion = CombineLoss() if args.cuda: criterion.cuda() maxloss = 0 validloss = validate(val_loader, model, criterion) maxloss = validloss for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) # train for one epoch train(train_loader, model, criterion, optimizer, epoch) # evaluate on validation set validloss =validate(val_loader, model, criterion) #只保存验证集loss小的模型 if validloss < maxloss: maxloss = validloss save_name = args.save_path + 'lightCNN_' + str(epoch+1) + '_checkpoint.pth.tar' save_checkpoint({ 'epoch': epoch + 1, 'arch': args.arch, 'state_dict': model.state_dict(), }, save_name)
val_loader = torch.utils.data.DataLoader( ImageList(root = args.root_path, fileList = args.val_list,type_of_data = "val", transform = transforms.Compose([ #transforms.RandomHorizontalFlip(), #transforms.ToTensor(), #transforms.Normalize(mean=[0.459,0.428,0.389]) ])), batch_size = args.batch_size, shuffle = False, num_workers = args.workers, pin_memory = True) """ train_loader = torch.utils.data.DataLoader(ImageList(root=args.root_path, fileList=args.train_list, type_of_data="train", transform=None), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) val_loader = torch.utils.data.DataLoader(ImageList(root=args.root_path, fileList=args.val_list, type_of_data="val", transform=None), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
def main(): global args args = parser.parse_args() if args.model == 'LightCNN-9': model = LightCNN_9Layers(num_classes=args.num_classes) elif args.model == 'LightCNN-29': model = LightCNN_29Layers(num_classes=args.num_classes) elif args.model == 'LightCNN-29v2': model = LightCNN_29Layers_v2(num_classes=args.num_classes) else: print('Error model type\n') use_cuda = args.cuda and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") print('Device being used is :' + str(device)) #model = torch.nn.DataParallel(model).to(device) model = model.to(device) DFWmodel = DFW().to(device) if args.pretrained: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) #checkpoint = torch.load(args.resume, map_location='cpu')['state_dict'] if device == 'cpu': state_dict = torch.load( args.resume, map_location='cpu' )['state_dict'] #torch.load(directory, map_location=lambda storage, loc: storage) else: state_dict = torch.load( args.resume, map_location=lambda storage, loc: storage)['state_dict'] new_state_dict = OrderedDict() for k, v in state_dict.items(): if k[:7] == 'module.': name = k[7:] # remove `module.` else: name = k new_state_dict[name] = v model.load_state_dict(new_state_dict, strict=True) #model.load_state_dict(checkpoint['state_dict']) else: print("=> no checkpoint found at '{}'".format(args.resume)) #load image train_loader = torch.utils.data.DataLoader( ImageList( root=args.root_path, fileList=args.train_list, transform=transforms.Compose([ transforms.Resize((128, 128)), #transforms.Resize((144,144)), #transforms.FiveCrop((128,128)), #transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])), transforms.RandomHorizontalFlip(), transforms.ToTensor(), ])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) val_loader = torch.utils.data.DataLoader(ImageList( root=args.root_path, fileList=args.val_list, transform=transforms.Compose([ transforms.Resize((128, 128)), transforms.ToTensor(), ])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) ''' for param in list(model.named_parameters()): print(param) ''' for name, param in model.named_parameters(): if 'fc' in name and 'fc2' not in name: param.requires_grad = True else: param.requires_grad = False ''' for name,param in model.named_parameters(): print(name, param.requires_grad) ''' params = list(model.fc.parameters()) + list(DFWmodel.parameters( )) #learnable parameters are fc layer of lightcnn and DFWModel parameters optimizer = optim.SGD(params, lr=args.lr, momentum=args.momentum) #optimizer = optim.Adam(params , lr=args.lr) #criterion = ContrastiveLoss(margin = 1.0 ).to(device) criterion = nn.BCELoss() #ContrastiveLoss(margin = 1.0 ).to(device) for epoch in range(args.start_epoch, args.epochs): #adjust_learning_rate(optimizer, epoch) # train for one epoch train(train_loader, model, DFWmodel, criterion, optimizer, epoch, device) # evaluate on validation set acc = validate(val_loader, model, DFWmodel, criterion, epoch, device) if epoch % 10 == 0: save_name = args.save_path + 'lightCNN_' + str( epoch + 1) + '_checkpoint.pth.tar' save_checkpoint( { 'epoch': epoch + 1, 'arch': args.arch, 'state_dict': model.state_dict(), 'acc': acc, 'optimizer': optimizer.state_dict(), }, save_name)
def main(): global args, best_prec1, seed args = parser.parse_args() #os.environ['CUDA_VISIBLE_DEVICES']=', '.join(str(x) for x in args.gpu) model=models.ResNet(depth=args.depth, pretrained=args.pretrained, cut_at_pooling=False, num_features=0, norm=False, dropout=0, num_classes=2) # # create model # if args.pretrained: # from system models # print("=> using pre-trained model '{}'".format(args.arch)) # model = models.__dict__[args.arch](pretrained=True) #from pytorch system use_cuda = not args.no_cuda and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") if use_cuda: model=model.cuda() ############################# important, it means model operation is conducted on cuda else: model=model() print ('Loading data from '+ args.root_path+args.trainFile) if args.da==0: train_loader = torch.utils.data.DataLoader( ImageList(root=args.root_path, fileList=args.root_path+args.trainFile, transform=transforms.Compose([ transforms.RandomHorizontalFlip(), # The order seriously matters: RandomHorizontalFlip, ToTensor, Normalize transforms.ToTensor(), transforms.Normalize(mean = [0.5, 0.5, 0.5 ], std = [ 0.5, 0.5, 0.5 ]), ])), batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True) elif args.da==1: train_loader = torch.utils.data.DataLoader( ImageList(root=args.root_path, fileList=args.root_path+args.trainFile, transform=transforms.Compose([ transforms.RandomHorizontalFlip(), # The order seriously matters: RandomHorizontalFlip, ToTensor, Normalize transforms.ColorJitter(), transforms.ToTensor(), transforms.Normalize(mean = [0.5, 0.5, 0.5 ], std = [ 0.5, 0.5, 0.5 ]), ])), batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True) elif args.da==2: deg=random.random()*10 train_loader = torch.utils.data.DataLoader( ImageList(root=args.root_path, fileList=args.root_path+args.trainFile, transform=transforms.Compose([ transforms.RandomHorizontalFlip(), # The order seriously matters: RandomHorizontalFlip, ToTensor, Normalize transforms.ColorJitter(), transforms.RandomRotation(deg), transforms.ToTensor(), transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [ 0.5, 0.5, 0.5 ]), ])), batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True) elif args.da==3: deg=random.random()*10 train_loader = torch.utils.data.DataLoader( ImageList(root=args.root_path, fileList=args.root_path+args.trainFile, transform=transforms.Compose([ transforms.RandomHorizontalFlip(), # The order seriously matters: RandomHorizontalFlip, ToTensor, Normalize transforms.RandomRotation(deg), transforms.ToTensor(), transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [ 0.5, 0.5, 0.5 ]), ])), batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True) elif args.da==4: deg=random.random()*20 train_loader = torch.utils.data.DataLoader( ImageList(root=args.root_path, fileList=args.root_path+args.trainFile, transform=transforms.Compose([ transforms.RandomHorizontalFlip(), # The order seriously matters: RandomHorizontalFlip, ToTensor, Normalize transforms.ColorJitter(), transforms.RandomRotation(deg), transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), ])), batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True) elif args.da==5: deg=random.random()*10 train_loader = torch.utils.data.DataLoader( ImageList(root=args.root_path, fileList=args.root_path+args.trainFile, transform=transforms.Compose([ transforms.RandomCrop(114), transforms.RandomHorizontalFlip(), # The order seriously matters: RandomHorizontalFlip, ToTensor, Normalize transforms.RandomRotation(deg), transforms.ToTensor(), transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [ 0.5, 0.5, 0.5 ]), ])), batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True) else: pass print ('Loading data from ' + args.root_path + args.testFile) if args.da==5: val_loader = torch.utils.data.DataLoader( ImageList(root=args.root_path, fileList=args.root_path+args.testFile, transform=transforms.Compose([ transforms.CenterCrop(114), transforms.ToTensor(), transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [0.5, 0.5, 0.5 ]) ])), batch_size=args.test_batch_size, shuffle=False, #### Shuffle should be switched off for face recognition of LFW num_workers=args.workers, pin_memory=True) else: val_loader = torch.utils.data.DataLoader( ImageList(root=args.root_path, fileList=args.root_path+args.testFile, transform=transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [0.5, 0.5, 0.5 ]) ])), batch_size=args.test_batch_size, shuffle=False, #### Shuffle should be switched off for face recognition of LFW num_workers=args.workers, pin_memory=True) # define loss function (criterion) and optimizer criterion = nn.CrossEntropyLoss().cuda() optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay) # optionally resume from a checkpoint if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) args.start_epoch = checkpoint['epoch'] best_prec1 = checkpoint['best_prec1'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})" .format(args.resume, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.resume)) cudnn.benchmark = True # This flag allows you to enable the inbuilt cudnn auto-tuner to find the best algorithm to use for your hardware. if args.evaluate: validate(val_loader, model, criterion) return fp = open(args.root_path + 'results_ResNet' + str(args.depth) + '_DA' + str(args.da) + '_LR_' + str(args.lr) + '.txt', "a") fp.write(str(seed)+'\n') fp.close() for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(optimizer, epoch) # every epoch=10, the learning rate is divided by 10 # train for one epoch train(train_loader, model, criterion, optimizer, epoch) # evaluate on validation set prec1 = validate(val_loader, model, criterion) fp = open(args.root_path + 'results_ResNet' + str(args.depth) + '_DA' + str(args.da) + '_LR_' + str(args.lr) + '.txt', "a") fp.write('{0:.3f} \n'.format(prec1)) if epoch==args.epochs-1: fp.write('\n \n \n') fp.close() # remember best prec@1 and save checkpoint is_best = prec1 >best_prec1 best_prec1 = max(prec1, best_prec1)