def train(): # Priors torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.set_device(0) # Dataset Dataset = Tiangong(root=args.dataset_root, mode='trainval') Dataloader = data.DataLoader(Dataset, args.batch_size, num_workers=args.num_workers, shuffle=True, pin_memory=True) # Network if args.basenet == 'ResNeXt': Network = ResNeXt101_64x4d(args.class_num) net = Network #torch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True if args.resume: Network.load_state_dict(torch.load(args.resume)) else: state_dict = torch.load('resnext101_64x4d-e77a0586.pth') state_dict.pop('last_linear.bias') state_dict.pop('last_linear.weight') Network.load_state_dict(state_dict, strict=False) init.xavier_uniform_(Network.last_linear.weight.data) Network.last_linear.bias.data.zero_() #for p in Network.features[0].parameters(): p.requires_grad=False #for p in Network.features[1].parameters(): p.requires_grad=False elif args.basenet == 'pnasnet': Network = pnasnet5large(args.class_num, args.resume) net = Network #torch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True if args.resume: Network.load_state_dict(torch.load(args.resume)) else: state_dict = torch.load('pnasnet5large-bf079911.pth') state_dict.pop('last_linear.bias') state_dict.pop('last_linear.weight') Network.load_state_dict(state_dict, strict=False) init.xavier_uniform_(Network.last_linear.weight.data) Network.last_linear.bias.data.zero_() net.train() net = net.cuda() cl = nn.CrossEntropyLoss() # Optimizer # Optimizer = optim.SGD(net.parameters(), lr = args.lr, momentum = args.momentum, #weight_decay = args.weight_decay) Optimizer = optim.RMSprop(net.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) # train step = args.start_iter loss = 0 for epoch in range(10000): for (imgs, anos) in Dataloader: y = net(imgs.cuda()) Optimizer.zero_grad() loss = cl(y, anos.cuda()) loss.backward() Optimizer.step() if step % 10 == 0: print('step: ' + str(step) + ', loss: ' + repr(loss.data)) step += 1 if step == args.adjust_iter: adjust_learning_rate(Optimizer, args.gamma) if step % 2000 == 0: torch.save( Network.state_dict(), 'weights/' + 'Tiangong_RMSProp' + args.basenet + repr(step) + '.pth')
def main(): #create model idxs = np.load('AnalysisTools/MulResampleIndex.npy') if args.MultiLabel == None : Dataset_train = H5Dataset(root = args.dataset_root, mode = 'training') else : Dataset_train = H5DatasetTensorAnno(root = args.dataset_root, mode = 'training', indices = idxs) Dataloader_train = data.DataLoader(Dataset_train, args.batch_size, num_workers = args.num_workers, shuffle = True, pin_memory = True) Dataset_validation = H5DatasetSia(root = args.dataset_root, mode = 'validation') Dataloader_validation = data.DataLoader(Dataset_validation, batch_size = 1, num_workers = args.num_workers, shuffle = True, pin_memory = True) torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.set_device(0) if args.basenet == 'ResNeXt': model = CifarResNeXt(num_classes = 17, depth = 29, cardinality = 8) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True if args.basenet == 'ShallowResNeXt': model = ShallowResNeXt(num_classes = 17, depth = 11, cardinality = 8) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True elif args.basenet == 'pnasnet': model = pnasnet5large(args.class_num, None) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True if args.resume: model.load_state_dict(torch.load(args.resume)) else: state_dict = torch.load('pnasnet5large-bf079911.pth') state_dict.pop('last_linear.bias') state_dict.pop('last_linear.weight') model.load_state_dict(state_dict, strict = False) init.xavier_uniform_(model.last_linear.weight.data) model.last_linear.bias.data.zero_() elif args.basenet == 'se_resnet101': model = se_resnet101(args.class_num, None) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True if args.resume: model.load_state_dict(torch.load(args.resume)) else: state_dict = torch.load('se_resnet101-7e38fcc6.pth') state_dict.pop('last_linear.bias') state_dict.pop('last_linear.weight') model.load_state_dict(state_dict, strict = False) init.xavier_uniform_(model.last_linear.weight.data) model.last_linear.bias.data.zero_() elif args.basenet == 'se_resnet50': model = se_resnet50(args.class_num, None) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True elif args.basenet == 'se_resnet50_shallow': model = se_resnet50_shallow(args.class_num, None) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True elif args.basenet == 'se_resnet50_shallow_sia': model = se_resnet50_shallow_sia(args.class_num, None) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True elif args.basenet == 'SimpleNet': model = SimpleNet(args.class_num) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True elif args.basenet == 'SimpleNetLeaky': model = SimpleNetLeaky(args.class_num) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True elif args.basenet == 'se_resnext101_32x4d': model = se_resnext101_32x4d(args.class_num, None) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True if args.resume: model.load_state_dict(torch.load(args.resume)) else: state_dict = torch.load('se_resnext101_32x4d-3b2fe3d8.pth') state_dict.pop('last_linear.bias') state_dict.pop('last_linear.weight') model.load_state_dict(state_dict, strict = False) init.xavier_uniform_(model.last_linear.weight.data) model.last_linear.bias.data.zero_() model = model.cuda() cudnn.benchmark = True #weights = torch.FloatTensor(weights) if args.MultiLabel == None : criterion = nn.CrossEntropyLoss().cuda() else : criterion = nn.SoftMarginLoss().cuda() Optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr = args.lr, momentum = args.momentum, weight_decay = args.weight_decay, nesterov = True) #torch.save(model.state_dict(), 'weights/SML_8cat10channel_'+ args.basenet +'/'+ 'LCZ42_SGD' + '.pth') for epoch in range(args.start_epoch, args.epochs): #adjust_learning_rate(Optimizer, epoch) # train for one epoch train(Dataloader_train, model, criterion, Optimizer, epoch, Dataloader_validation) #train(Dataloader_train, Network, criterion, Optimizer, epoch)
def GeResult(): # Priors torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.set_device(0) # Dataset Dataset = TiangongResultMerge(root='data') Dataloader = data.DataLoader(Dataset, 1, num_workers=1, shuffle=True, pin_memory=True) # Network #Network = pnasnet5large(6, None) Network = ResNeXt101_64x4d(6) net = torch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True Network.load_state_dict( torch.load('weights/aug_ResNeXt/_Tiangong_SGD_85.pth')) #net = torch.load('weights/aug_ResNeXt/Tiangong55000use.pth') Network.eval() Network2 = pnasnet5large(6, None) net2 = torch.nn.DataParallel(Network2, device_ids=[0]) cudnn.benchmark = True Network2.load_state_dict( torch.load('weights/aug_fix1block_pnasnet/_Tiangong_SGD_85.pth')) net2.eval() Network3 = se_resnet50(6, None) net3 = torch.nn.DataParallel(Network3, device_ids=[0]) cudnn.benchmark = True Network3.load_state_dict( torch.load('weights/aug_se_resnet50/_Tiangong_SGD_95.pth')) #Network3.load_state_dict(torch.load('weights/ResSample_aug_se_resnet50/_Tiangong_SGD_60.pth')) net3.eval() filename = 'Rejection_se_resnet50_pnasnet_resnext.csv' # Result file preparation if os.path.exists(filename): os.remove(filename) os.mknod(filename) f = open(filename, 'w') for (imgs, img2, anos) in Dataloader: imgs = imgs.cuda() pred1 = Network.forward(imgs) pred2 = net2.forward(img2) pred3 = net3.forward(imgs) # eliminate the predictions that has low probality ''' pred1 = torch.nn.functional.normalize(pred1) pred2 = torch.nn.functional.normalize(pred2) pred3 = torch.nn.functional.normalize(pred3) ''' pred1 = pred1 + 2 * torch.mul(pred1, torch.le(pred1, 0).float()) pred2 = pred2 + 2 * torch.mul(pred2, torch.le(pred2, 0).float()) pred3 = pred3 + 2 * torch.mul(pred3, torch.le(pred3, 0).float()) #print(pred1) #pred1 = torch.mul(pred1, torch.ge(pred1,torch.topk(pred1, dim = 1, k=3, largest = True)[0][2])) #pred2 = torch.mul(pred2, torch.ge(pred2,torch.topk(pred2, k=3, largest = True)[0][2])) #pred3 = torch.mul(pred3, torch.ge(pred3,torch.topk(pred3, k=3, largest = True)[0][2])) preds = torch.add(pred1, pred2) preds.add(pred3) _, pred = preds.data.topk(1, 1, True, True) f.write(anos[0] + ',' + CLASSES[pred[0][0]] + '\r\n')
def main(): #create model best_prec1 = 0 torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.set_device(0) if args.basenet == 'ResNeXt': model = ResNeXt101_64x4d(args.class_num) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True if args.resume: Network.load_state_dict(torch.load(args.resume)) else: state_dict = torch.load('resnext101_64x4d-e77a0586.pth') state_dict.pop('last_linear.bias') state_dict.pop('last_linear.weight') model.load_state_dict(state_dict, strict=False) init.xavier_uniform_(model.last_linear.weight.data) model.last_linear.bias.data.zero_() for p in model.features[0].parameters(): p.requires_grad = False for p in model.features[1].parameters(): p.requires_grad = False elif args.basenet == 'pnasnet': model = pnasnet5large(args.class_num, None) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True if args.resume: model.load_state_dict(torch.load(args.resume)) else: state_dict = torch.load('pnasnet5large-bf079911.pth') state_dict.pop('last_linear.bias') state_dict.pop('last_linear.weight') model.load_state_dict(state_dict, strict=False) init.xavier_uniform_(model.last_linear.weight.data) model.last_linear.bias.data.zero_() model = model.cuda() cudnn.benchmark = True # Dataset Dataset_train = Tiangong(root=args.dataset_root, mode='trainval') Dataloader_train = data.DataLoader(Dataset_train, args.batch_size, num_workers=args.num_workers, shuffle=True, pin_memory=True) Dataset_val = Tiangong(root=args.dataset_root, mode='val') Dataloader_val = data.DataLoader(Dataset_val, batch_size=1, num_workers=args.num_workers, shuffle=True, pin_memory=True) criterion = nn.CrossEntropyLoss().cuda() Optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(Optimizer, epoch) # train for one epoch train(Dataloader_train, model, criterion, Optimizer, epoch ) #train(Dataloader_train, Network, criterion, Optimizer, epoch) # evaluate on validation set #prec1 = validate(Dataloader_val, model, criterion) #prec1 = validate(Dataloader_val, Network, criterion) # remember best prec@1 and save checkpoint #is_best = prec1 > best_prec1 #best_prec1 = max(prec1, best_prec1) #if is_best: torch.save( model.state_dict(), 'weights/fixblock_Newtrain_' + args.basenet + '/' + '_Tiangong_RMSProp_' + repr(epoch) + '.pth')
def main(): #create model best_prec1 = 0 # Dataset #idxs = np.load('SubResampleIndex.npy') Dataset_train = H5Dataset(root='data', mode='training', DataType='sen2') Dataloader_train = data.DataLoader(Dataset_train, args.batch_size, num_workers=args.num_workers, shuffle=True, pin_memory=True) ''' Dataloader_train = data.DataLoader(Dataset_train, args.batch_size, num_workers = args.num_workers, shuffle = True, pin_memory = True) ''' Dataset_validation = H5Dataset(root=args.dataset_root, mode='validation', DataType='sen2') Dataloader_validation = data.DataLoader(Dataset_validation, batch_size=1, num_workers=args.num_workers, shuffle=True, pin_memory=True) torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.set_device(0) if args.basenet == 'ResNeXt': model = Sen2ResNeXt(num_classes=args.class_num, depth=11, cardinality=16) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True if args.basenet == 'SimpleNetSen2': model = SimpleNetSen2(args.class_num) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True elif args.basenet == 'se_resnet50': model = se_resnet50(args.class_num, None) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True elif args.basenet == 'se_resnet50_shallow': model = se_resnet50_shallow(args.class_num, None) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True elif args.basenet == 'nasnetamobile': model = nasnetamobile(args.class_num, None) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True elif args.basenet == 'pnasnet': model = pnasnet5large(args.class_num, None) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True if args.resume: model.load_state_dict(torch.load(args.resume)) else: state_dict = torch.load('pnasnet5large-bf079911.pth') state_dict.pop('last_linear.bias') state_dict.pop('last_linear.weight') model.load_state_dict(state_dict, strict=False) init.xavier_uniform_(model.last_linear.weight.data) model.last_linear.bias.data.zero_() elif args.basenet == 'se_resnet101': model = se_resnet101(args.class_num, None) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True if args.resume: model.load_state_dict(torch.load(args.resume)) else: state_dict = torch.load('se_resnet101-7e38fcc6.pth') state_dict.pop('last_linear.bias') state_dict.pop('last_linear.weight') model.load_state_dict(state_dict, strict=False) init.xavier_uniform_(model.last_linear.weight.data) model.last_linear.bias.data.zero_() elif args.basenet == 'se_resnext101_32x4d': model = se_resnext101_32x4d(args.class_num, None) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) cudnn.benchmark = True if args.resume: model.load_state_dict(torch.load(args.resume)) else: state_dict = torch.load('se_resnext101_32x4d-3b2fe3d8.pth') state_dict.pop('last_linear.bias') state_dict.pop('last_linear.weight') model.load_state_dict(state_dict, strict=False) init.xavier_uniform_(model.last_linear.weight.data) model.last_linear.bias.data.zero_() model = model.cuda() cudnn.benchmark = True ''' weights = [ 9.73934491, 2.02034301, 1.55741015, 5.70558317, 2.99272419, 1.39866818, 15.09911288, 1.25512384, 3.63361307, 4.12907813, 1.1505058 , 5.18803868, 5.38559738, 1.1929091 , 20.63503344, 6.24955685, 1. ] ''' #weights = np.load('Precision_CM.npy') #weights = 1/torch.diag(torch.FloatTensor(weights))weight = weights criterion = nn.CrossEntropyLoss().cuda() #criterion = nn.CosineEmbeddingLoss().cuda() Optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True) torch.save( model.state_dict(), 'weights/lr8e-3_bs8_sen2_' + args.basenet + '/' + 'LCZ42_SGD' + '.pth') for epoch in range(args.start_epoch, args.epochs): #adjust_learning_rate(Optimizer, epoch) # train for one epoch train(Dataloader_train, model, criterion, Optimizer, epoch, Dataloader_validation ) #train(Dataloader_train, Network, criterion, Optimizer, epoch)