def main(): #create model best_prec1 = 0 if args.basenet == 'se_resnet152': model = MultiModalNet('se_resnet152', 'DPN26', 0.5) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) elif args.basenet == 'multiscale_se_resnext': model = MultiModalNet1('multiscale_se_resnext', 'DPN26', 0.5) elif args.basenet == 'densenet201': model = MultiModalNet2('densenet201', 'DPN26', 0.5) elif args.basenet == 'oct_resnet101': model = oct_resnet101() # print("load pretrained model from /home/dell/Desktop/2019BaiduXJTU/weights/densenet201_se_resnext50_32x4d_resample_pretrained_80w_1/BDXJTU2019_SGD_4.pth") # pre='/home/dell/Desktop/2019BaiduXJTU/weights/densenet201_se_resnext50_32x4d_resample_pretrained_80w_1/BDXJTU2019_SGD_1.pth' # model.load_state_dict(torch.load(pre)) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) # if torch.cuda.device_count() > 1: # print("Let's use", torch.cuda.device_count(), "GPUs!") # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs # model = nn.DataParallel(model) # if torch.cuda.device_count() > 1: # model = nn.DataParallel(model) model.to(device) # Dataset Aug = Augmentation() Dataset_train = MM_BDXJTU2019(root='/home/dell/Desktop/2019BaiduXJTU/data', mode='MM_1_train', transform=Aug) #weights = [class_ration[label] for data,label in Dataset_train] Dataloader_train = data.DataLoader(Dataset_train, 32, num_workers=4, shuffle=True, pin_memory=True) Dataset_val = MM_BDXJTU2019(root='/home/dell/Desktop/2019BaiduXJTU/data', mode='val') Dataloader_val = data.DataLoader(Dataset_val, batch_size=32, num_workers=4, shuffle=True, pin_memory=True) # criterion = nn.CrossEntropyLoss(weight = weights).cuda() criterion = nn.CrossEntropyLoss().to(device) # Optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr = args.lr, momentum = args.momentum, # weight_decay = args.weight_decay) Optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(Optimizer, epoch) # train for one epoch train(Dataloader_train, model, criterion, Optimizer, epoch ) #train(Dataloader_train, Network, criterion, Optimizer, epoch) # evaluate on validation set #_,_ = validate(Dataloader_val, model, criterion) #prec1 = validate(Dataloader_val, Network, criterion) # remember best prec@1 and save checkpoint #is_best = prec1 > best_prec1 #best_prec1 = max(prec1, best_prec1) #if is_best: if epoch % 1 == 0: torch.save( model.state_dict(), 'weights/' + args.basenet + '_se_resnext50_32x4d_resample_pretrained_80w_1/' + 'BDXJTU2019_SGD_' + repr(epoch) + '.pth')
def main(): ###Enter Main Func: mp.set_start_method('spawn') #create model torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.set_device(0) MODEL_NAME = 'multiscale_se_resnext_HR' MODEL_DIR = op.join('weights', MODEL_NAME) BEST_DIR = op.join('weights', 'best_models') if not op.isdir(MODEL_DIR): os.mkdir(MODEL_DIR) if not op.isdir(BEST_DIR): os.mkdir(BEST_DIR) # if args.basenet == 'MultiModal': model = MultiModalNet(MODEL_NAME, 'DPN26', 0.5) # #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) # elif args.basenet == 'oct_resnet101': # model = oct_resnet101() # #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) model = model.cuda() cudnn.benchmark = True RESUME = False # MODEL_PATH = './weights/best_models/se_resnext50_32x4d_SGD_w_46.pth' pthlist = [i for i in os.listdir(MODEL_DIR) if i[-4:] == '.pth'] if len(pthlist) > 0: pthlist.sort(key=lambda x: eval(re.findall(r'\d+', x)[-1])) MODEL_PATH = op.join(MODEL_DIR, pthlist[-1]) model.load_state_dict(torch.load(MODEL_PATH)) RESUME = True # Dataset Aug = Augmentation() Dataset_train = MM_BDXJTU2019(root=args.dataset_root, mode='train', transform=Aug, TRAIN_IMAGE_DIR='train_image_raw') #weights = [class_ration[label] for data,label in Dataset_train] Dataloader_train = data.DataLoader(Dataset_train, args.batch_size, num_workers=args.num_workers, shuffle=True, pin_memory=True) Dataset_val = MM_BDXJTU2019(root=args.dataset_root, mode='val', TRAIN_IMAGE_DIR='train_image_raw') Dataloader_val = data.DataLoader(Dataset_val, batch_size=64, num_workers=args.num_workers, shuffle=True, pin_memory=True) criterion = nn.CrossEntropyLoss(weight=weights).cuda() Optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) args.start_epoch = eval(re.findall(r'\d+', MODEL_PATH)[-1]) if RESUME else 0 best_pred1, best_preds = 0, {} if RESUME and op.isfile(best_log): best_preds = json.load(open(best_log)) best_pred1 = best_preds['best_pred1'] elif len(os.listdir(BEST_DIR)) > 0: best_model = MultiModalNet(MODEL_NAME, 'DPN26', 0.5).cuda() pthlist = [i for i in os.listdir(BEST_DIR) if i[-4:] == '.pth'] pthlist.sort(key=lambda x: eval(re.findall(r'\d+', x)[-1])) best_model.load_state_dict(torch.load(op.join(BEST_DIR, pthlist[-1]))) best_pred1 = validate(Dataloader_val, best_model, criterion, printable=False)[0] log_dict(eval(re.findall(r'\d+', pthlist[-1])[-1]), best_pred1) log('#Resume: Another Start from Epoch {}'.format(args.start_epoch + 1)) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(Optimizer, epoch) # train for one epoch train(Dataloader_train, model, criterion, Optimizer, epoch ) #train(Dataloader_train, Network, criterion, Optimizer, epoch) # evaluate on validation set pred1, pred5 = validate( Dataloader_val, model, criterion) #pred1 = validate(Dataloader_val, Network, criterion) # remember best pred@1 and save checkpoint COMMON_MODEL_PATH = op.join(MODEL_DIR, 'SGD_fold1_{}.pth'.format(epoch + 1)) BEST_MODEL_PATH = op.join( BEST_DIR, '{}_SGD_fold1_{}.pth'.format(MODEL_NAME, epoch + 1)) torch.save(model.state_dict(), COMMON_MODEL_PATH) if pred1 > best_pred1: best_pred1 = max(pred1, best_pred1) torch.save(model.state_dict(), BEST_MODEL_PATH) log_dict(epoch + 1, best_pred1) log('Epoch:{}\tpred1:{}\tBest_pred1:{}\n'.format( epoch + 1, pred1, best_pred1))
def main(): #create model best_prec1 = 0 torch.set_default_tensor_type('torch.cuda.FloatTensor') torch.cuda.set_device(0) if args.basenet == 'MultiModal': model = MultiModalNet('se_resnext50_32x4d', 'DPN26', 0.5) #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) elif args.basenet == 'oct_resnet101': model = oct_resnet101() #net = Networktorch.nn.DataParallel(Network, device_ids=[0]) model = model.cuda() cudnn.benchmark = True # Dataset Aug = Augmentation() Dataset_train = MM_BDXJTU2019(root=args.dataset_root, mode='MM_1_train', transform=Aug) #weights = [class_ration[label] for data,label in Dataset_train] Dataloader_train = data.DataLoader(Dataset_train, args.batch_size, num_workers=args.num_workers, shuffle=True, pin_memory=True) Dataset_val = BDXJTU2019(root=args.dataset_root, mode='val') Dataloader_val = data.DataLoader(Dataset_val, batch_size=8, num_workers=args.num_workers, shuffle=True, pin_memory=True) criterion = nn.CrossEntropyLoss(weight=weights).cuda() Optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) for epoch in range(args.start_epoch, args.epochs): adjust_learning_rate(Optimizer, epoch) # train for one epoch train(Dataloader_train, model, criterion, Optimizer, epoch ) #train(Dataloader_train, Network, criterion, Optimizer, epoch) # evaluate on validation set #_,_ = validate(Dataloader_val, model, criterion) #prec1 = validate(Dataloader_val, Network, criterion) # remember best prec@1 and save checkpoint #is_best = prec1 > best_prec1 #best_prec1 = max(prec1, best_prec1) #if is_best: if epoch % 1 == 0: torch.save( model.state_dict(), 'weights/' + args.basenet + '_se_resnext50_32x4d_resample_pretrained_80w_1/' + 'BDXJTU2019_SGD_' + repr(epoch) + '.pth')