def inference_time_test(args, model): # filenames = os.listdir(images) # data_list = [] # gt_list = [] # img_ids = [] # for filename in filenames: # ext = os.path.splitext(filename)[-1] # if ext == '.jpg': # filename = filename.split('_')[-1][:-len('.jpg')] # img_ids.append(filename) # data_list.append('ISIC_' + filename + '.jpg') # gt_list.append('ISIC_' + filename + '_segmentation.png') # # assert (len(data_list) == len(gt_list)) # data_list = [os.path.join(images, i) for i in data_list] # gt_list = [os.path.join(mask, i) for i in gt_list] dataloader = get_dataloder(args, split_flag='train') count = 0 model = model.to(args.device) model.eval() start = time.time() for step, (input, target, _) in tqdm.tqdm(enumerate(dataloader)): input = input.to(args.device) target = target.to(args.device) output = model(input) count += args.train_batch if step > 20: break end_tim = time.time() return (end_tim - start) / count
def main(args): ############ init config ################ model_name = args.model assert model_name in models_dict.keys(),"The Usage model is not exist !" print('Usage model :{}'.format(model_name)) #################### init logger ################################### log_dir = './logs/'+ args.model+'_'+args.note + '/{}'.format(time.strftime('%Y%m%d-%H%M%S')) logger = get_logger(log_dir) print('RUNDIR: {}'.format(log_dir)) logger.info('{}-Train'.format(args.model)) # setting setting={k: v for k, v in args._get_kwargs()} logger.info(setting) args.save_path = log_dir args.save_tbx_log = args.save_path + '/tbx_log' writer = SummaryWriter(args.save_tbx_log) ##################### init device ################################# if args.manualSeed is None: args.manualSeed = random.randint(1, 10000) np.random.seed(args.manualSeed) torch.manual_seed(args.manualSeed) args.use_cuda= args.gpus>0 and torch.cuda.is_available() args.device = torch.device('cuda' if args.use_cuda else 'cpu') if args.use_cuda: torch.cuda.manual_seed(args.manualSeed) cudnn.benchmark = True ####################### init dataset ########################################### train_loader=get_dataloder(args,split_flag="train") val_loader=get_dataloder(args,split_flag="valid") ######################## init model ############################################ # model logger.info("Model Dict has keys: \n {}".format(models_dict.keys())) model=get_models(args) if torch.cuda.device_count() > 1 and args.use_cuda: logger.info('use: %d gpus', torch.cuda.device_count()) model = nn.DataParallel(model) logger.info('param size = %fMB', calc_parameters_count(model)) # init loss if args.loss=='bce': criterion=nn.BCELoss() elif args.loss=='bcelog': criterion=nn.BCEWithLogitsLoss() elif args.loss=="dice": criterion=DiceLoss() elif args.loss=="softdice": criterion=SoftDiceLoss() elif args.loss=='bcedice': criterion=BCEDiceLoss() else: criterion=nn.CrossEntropyLoss() if args.use_cuda: logger.info("load model and criterion to gpu !") model=model.to(args.device) criterion=criterion.to(args.device) # init optimizer if args.model_optimizer=="sgd": #torch.optim.SGD(parametetrs,lr=args.lr,weight_decay=args.weight_decay,momentum=args.momentum) optimizer=torch.optim.SGD(model.parameters(),lr=args.lr,weight_decay=args.weight_decay,momentum=args.momentum) else: optimizer=torch.optim.Adam(model.parameters(),args.lr,[args.beta1, args.beta2], weight_decay=args.weight_decay) # init schedulers Steplr scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,args.epoch) # scheduler=torch.optim.lr_scheduler.StepLR(optimizer=optimizer,step_size=30,gamma=0.1,last_epoch=-1) ############################### check resume ######################### start_epoch=0 if args.resume is not None: if os.path.isfile(args.resume): logger.info("Loading model and optimizer from checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume, map_location=args.device) start_epoch = checkpoint['epoch'] optimizer.load_state_dict(checkpoint['optimizer']) model.load_state_dict(checkpoint['state_dict']) scheduler.load_state_dict(checkpoint['scheduler']) else: raise FileNotFoundError("No checkpoint found at '{}'".format(args.resume)) #################################### train and val ######################## max_value=0 for epoch in range(start_epoch,args.epoch): # lr=adjust_learning_rate(args,optimizer,epoch) scheduler.step() logger.info('Epoch: %d lr %e', epoch, scheduler.get_lr()[0]) # train mr, ms, mp, mf, mjc, md, macc, mean_loss=train(args, model, criterion, train_loader, optimizer, epoch, logger) # write writer.add_scalar('Train/Loss', mean_loss, epoch) writer.add_scalar('Train/mAcc', macc, epoch) writer.add_scalar('Train/Recall', mr, epoch) writer.add_scalar('Train/Specifi', ms, epoch) writer.add_scalar('Train/Precision', mp, epoch) writer.add_scalar('Train/F1', mf, epoch) writer.add_scalar('Train/Jc', mjc, epoch) writer.add_scalar('Train/Dice', md, epoch) # val vmr, vms, vmp, vmf, vmjc, vmd, vmacc, vmean_loss=val(args, model, criterion, val_loader, epoch, logger) writer.add_scalar('Val/Loss', vmean_loss, epoch) writer.add_scalar('Val/mAcc', vmacc, epoch) writer.add_scalar('Val/Recall', vmr, epoch) writer.add_scalar('Val/Specifi', vms, epoch) writer.add_scalar('Val/Precision', vmp, epoch) writer.add_scalar('Val/F1', vmf, epoch) writer.add_scalar('Val/Jc', vmjc, epoch) writer.add_scalar('Val/Dice', vmd, epoch) is_best=True if (vmjc>=max_value) else False max_value=max(max_value,vmjc) state={ 'epoch': epoch, 'optimizer': optimizer.state_dict(), 'state_dict': model.state_dict(), 'scheduler': model.state_dict(), } logger.info("epoch:{} best:{} max_value:{}".format(epoch,is_best,max_value)) if not is_best: torch.save(state,os.path.join(args.save_path,"checkpoint.pth.tar")) else: torch.save(state,os.path.join(args.save_path,"checkpoint.pth.tar")) torch.save(state,os.path.join(args.save_path,"model_best.pth.tar")) writer.close()
def main(args): #################### init logger ################################### args.model='unet' model_weight_path='../logs/isic2018/unet_ep300/20200402-135108/model_best.pth.tar' model=get_models(args) model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) log_dir = './models/' + args.model+'_prune_'+args.note logger = get_logger(log_dir) print('RUNDIR: {}'.format(log_dir)) logger.info('{}-L1Prune'.format(args.model)) # setting args.save_path = log_dir args.save_tbx_log = args.save_path + '/tbx_log' writer = SummaryWriter(args.save_tbx_log) if args.manualSeed is None: args.manualSeed = random.randint(1, 10000) np.random.seed(args.manualSeed) torch.manual_seed(args.manualSeed) args.use_cuda = args.gpus > 0 and torch.cuda.is_available() args.device = torch.device('cuda' if args.use_cuda else 'cpu') if args.use_cuda: torch.cuda.manual_seed(args.manualSeed) cudnn.benchmark = True setting = {k: v for k, v in args._get_kwargs()} logger.info(setting) train_loader=get_dataloder(args,split_flag="train") val_loader=get_dataloder(args,split_flag="valid") # init loss if args.loss == 'bce': criterion = nn.BCELoss() elif args.loss == 'bcelog': criterion = nn.BCEWithLogitsLoss() elif args.loss == "dice": criterion = DiceLoss() elif args.loss == "softdice": criterion = SoftDiceLoss() elif args.loss == 'bcedice': criterion = BCEDiceLoss() else: criterion = nn.CrossEntropyLoss() if args.use_cuda: logger.info("load model and criterion to gpu !") model = model.to(args.device) criterion = criterion.to(args.device) logger.info("Original trained model performance test: ") infer(args, model, criterion, val_loader,logger) # Pruning # Pruning Configuration, in paper 'PRUNING FILTERS FOR EFFICIENT CONVNETS', configure_list = [{ 'sparsity': 0.5, 'op_types': ['Conv2d'], 'op_names': ['Conv1.conv.0','Conv1.conv.3','Conv2.conv.0','Conv2.conv.3','Conv3.conv.0','Conv3.conv.3', 'Conv4.conv.0','Conv4.conv.3','Conv5.conv.0','Conv5.conv.3', 'Up5.up.1','Up_conv5.conv.0','Up_conv5.conv.3', 'Up4.up.1','Up_conv4.conv.0','Up_conv4.conv.3', 'Up3.up.1','Up_conv3.conv.0','Up_conv3.conv.3', 'Up2.up.1','Up_conv2.conv.0','Up_conv2.conv.3', ]} ] # Prune model and test accuracy without fine tuning. logger.info('=' * 10 + 'Test on the pruned model before fine tune' + '=' * 10) pruner = L1FilterPruner(model, configure_list) # change the forward func (mul pruning mask ) model = pruner.compress() # test performance without finetuning logger.info("Pruning trained model performance test: ") infer(args, model, criterion, val_loader,logger) # Fine tune the pruned model for 40 epochs and test accuracy logger.info('=' * 10 + 'Fine tuning' + '=' * 10) #torch.optim.SGD(parametetrs,lr=args.lr,weight_decay=args.weight_decay,momentum=args.momentum) optimizer=torch.optim.SGD(model.parameters(),lr=args.lr,weight_decay=args.weight_decay,momentum=args.momentum) # init schedulers Steplr scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,args.epoch) max_value = 0 for epoch in range(0, args.epoch): # lr=adjust_learning_rate(args,optimizer,epoch) scheduler.step() logger.info('Epoch: %d lr %e', epoch, scheduler.get_lr()[0]) # update mask pruner.update_epoch(epoch) # train train(args, model, criterion, train_loader,optimizer, epoch, logger) # val vmr, vms, vmp, vmf, vmjc, vmd, vmacc,vloss = infer(args, model, criterion, val_loader,logger) writer.add_scalar('Val/Loss', vloss, epoch) writer.add_scalar('Val/mAcc', vmacc, epoch) writer.add_scalar('Val/Recall', vmr, epoch) writer.add_scalar('Val/Specifi', vms, epoch) writer.add_scalar('Val/Precision', vmp, epoch) writer.add_scalar('Val/F1', vmf, epoch) writer.add_scalar('Val/Jc', vmjc, epoch) writer.add_scalar('Val/Dice', vmd, epoch) is_best = True if (vmjc >= max_value) else False max_value = max(max_value, vmjc) if is_best: pruner.export_model(model_path=os.path.join(args.save_path,"best_prune_unet.pth"), mask_path=os.path.join(args.save_path,'mask_prune_indexs.pth')) state = { 'epoch': epoch, 'optimizer': optimizer.state_dict(), 'state_dict': model.state_dict(), 'scheduler': model.state_dict(), } logger.info("epoch:{} best:{} max_value:{}".format(epoch, is_best, max_value)) torch.save(state, os.path.join(args.save_path, "checkpoint.pth.tar")) writer.close() # test the best_prune_unet.pth args.model='unet' model_weight_path=os.path.join(args.save_path,"best_prune_unet.pth") model=get_models(args) model.load_state_dict(torch.load(model_weight_path, map_location='cpu')) model = model.to(args.device) logger.info("Final saved pruned model performance test: ") infer(args, model, criterion, val_loader,logger)
def main(args): #################### init logger ################################### log_dir = './logs/' + '{}'.format(args.dataset) + '/{}_{}_{}'.format(args.model,args.note,time.strftime('%Y%m%d-%H%M%S')) logger = get_logger(log_dir) print('RUNDIR: {}'.format(log_dir)) logger.info('{}-Train'.format(args.model)) # setting args.save_path = log_dir args.save_tbx_log = args.save_path + '/tbx_log' writer = SummaryWriter(args.save_tbx_log) ##################### init device ################################# if args.manualSeed is None: args.manualSeed = random.randint(1, 10000) np.random.seed(args.manualSeed) torch.manual_seed(args.manualSeed) args.use_cuda = args.gpus > 0 and torch.cuda.is_available() args.device = torch.device('cuda' if args.use_cuda else 'cpu') if args.use_cuda: torch.cuda.manual_seed(args.manualSeed) cudnn.benchmark = True ####################### init dataset ########################################### train_loader = get_dataloder(args, split_flag="train") val_loader = get_dataloder(args, split_flag="valid") ############init model ########################### if args.model == "layer7_double_deep": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'layer7_double_deep' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=7, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux ) elif args.model == "stage1_double_deep": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'stage1_double_deep' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux ) elif args.model == "stage1_nodouble_deep": args.deepsupervision = True args.double_down_channel = False args.genotype_name = 'stage1_deep' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux ) elif args.model == "stage1_nodouble_deep_slim": args.deepsupervision = True args.double_down_channel = False args.genotype_name = 'stage1_deep' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPruneSlim( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux ) elif args.model == "alpha1_stage1_double_deep_ep80": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'alpha1_stage1_double_deep_ep80' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux ) elif args.model == "alpha0_stage1_double_deep_ep80": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'alpha0_stage1_double_deep_ep80' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux ) #isic trans elif args.model == "stage1_layer9_110epoch_double_deep_final": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'stage1_layer9_110epoch_double_deep_final' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux ) # just normaL cell keep elif args.model == "dd_normal": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'alpha0_5_stage1_double_deep_ep80' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPruneNormal( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux ) # normal+down elif args.model == "dd_normaldown": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'alpha0_5_stage1_double_deep_ep80' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPruneNormalDown( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux ) # normal+up elif args.model == "dd_normalup": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'alpha0_5_stage1_double_deep_ep80' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPruneNormalUp( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux ) # normal+up+down elif args.model == "alpha0_5_stage1_double_deep_ep80": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'alpha0_5_stage1_double_deep_ep80' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux ) # abliation study of channel doubling and deepsupervision elif args.model == "alpha0_5_stage1_double_nodeep_ep80": args.deepsupervision = False args.double_down_channel = True args.genotype_name = 'alpha0_5_stage1_double_nodeep_ep80' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux ) elif args.model == "alpha0_5_stage1_nodouble_deep_ep80": args.deepsupervision = True args.double_down_channel = False args.genotype_name = 'alpha0_5_stage1_nodouble_deep_ep80' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux ) elif args.model == "alpha0_5_stage1_nodouble_nodeep_ep80": args.deepsupervision = False args.double_down_channel = False args.genotype_name = 'alpha0_5_stage1_nodouble_nodeep_ep80' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux ) if torch.cuda.device_count() > 1 and args.use_cuda: logger.info('use: %d gpus', torch.cuda.device_count()) model = nn.DataParallel(model) setting = {k: v for k, v in args._get_kwargs()} logger.info(setting) logger.info(genotype) logger.info('param size = %fMB', calc_parameters_count(model)) # init loss if args.loss == 'bce': criterion = nn.BCELoss() elif args.loss == 'bcelog': criterion = nn.BCEWithLogitsLoss() elif args.loss == "dice": criterion = DiceLoss() elif args.loss == "softdice": criterion = SoftDiceLoss() elif args.loss == 'bcedice': criterion = BCEDiceLoss() else: criterion = nn.CrossEntropyLoss() if args.use_cuda: logger.info("load model and criterion to gpu !") model = model.to(args.device) criterion = criterion.to(args.device) # init optimizer optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum) # init schedulers Steplr scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epoch) # scheduler=torch.optim.lr_scheduler.StepLR(optimizer=optimizer,step_size=30,gamma=0.1,last_epoch=-1) ############################### check resume ######################### start_epoch = 0 if args.resume is not None: if os.path.isfile(args.resume): logger.info("Loading model and optimizer from checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume, map_location=args.device) start_epoch = checkpoint['epoch'] optimizer.load_state_dict(checkpoint['optimizer']) model.load_state_dict(checkpoint['state_dict']) scheduler.load_state_dict(checkpoint['scheduler']) else: raise FileNotFoundError("No checkpoint found at '{}'".format(args.resume)) #################################### train and val ######################## max_value = 0 for epoch in range(start_epoch, args.epoch): # lr=adjust_learning_rate(args,optimizer,epoch) scheduler.step() logger.info('Epoch: %d lr %e', epoch, scheduler.get_lr()[0]) # train if args.deepsupervision: mean_loss, value1, value2 = train(args, model, criterion, train_loader, optimizer) mr, ms, mp, mf, mjc, md, macc = value1 mmr, mms, mmp, mmf, mmjc, mmd, mmacc = value2 logger.info( "Epoch:{} Train_Loss:{:.3f} Acc:{:.3f} Dice:{:.3f} Jc:{:.3f}".format(epoch, mean_loss, macc, md, mjc)) logger.info(" dmAcc:{:.3f} dmDice:{:.3f} dmJc:{:.3f}".format(mmacc, mmd, mmjc)) writer.add_scalar('Train/dmAcc', mmacc, epoch) writer.add_scalar('Train/dRecall', mmr, epoch) writer.add_scalar('Train/dSpecifi', mms, epoch) writer.add_scalar('Train/dPrecision', mmp, epoch) writer.add_scalar('Train/dF1', mmf, epoch) writer.add_scalar('Train/dJc', mmjc, epoch) writer.add_scalar('Train/dDice', mmd, epoch) else: mean_loss, value1 = train(args, model, criterion, train_loader, optimizer) mr, ms, mp, mf, mjc, md, macc = value1 logger.info( "Epoch:{} Train_Loss:{:.3f} Acc:{:.3f} Dice:{:.3f} Jc:{:.3f}".format(epoch, mean_loss, macc, md, mjc)) # write writer.add_scalar('Train/Loss', mean_loss, epoch) writer.add_scalar('Train/mAcc', macc, epoch) writer.add_scalar('Train/Recall', mr, epoch) writer.add_scalar('Train/Specifi', ms, epoch) writer.add_scalar('Train/Precision', mp, epoch) writer.add_scalar('Train/F1', mf, epoch) writer.add_scalar('Train/Jc', mjc, epoch) writer.add_scalar('Train/Dice', md, epoch) # val if args.deepsupervision: vmean_loss, valuev1, valuev2 = infer(args, model, criterion, val_loader) vmr, vms, vmp, vmf, vmjc, vmd, vmacc = valuev1 mvmr, mvms, mvmp, mvmf, mvmjc, mvmd, mvmacc = valuev2 logger.info( "Epoch:{} Val_Loss:{:.3f} Acc:{:.3f} Dice:{:.3f} Jc:{:.3f}".format(epoch, vmean_loss, vmacc, vmd, vmjc)) logger.info(" dmAcc:{:.3f} dmDice:{:.3f} dmJc:{:.3f}".format(mvmacc, mvmd, mvmjc)) writer.add_scalar('Val/mAcc', mvmacc, epoch) writer.add_scalar('Val/Recall', mvmr, epoch) writer.add_scalar('Val/Specifi', mvms, epoch) writer.add_scalar('Val/Precision', mvmp, epoch) writer.add_scalar('Val/F1', mvmf, epoch) writer.add_scalar('Val/Jc', mvmjc, epoch) writer.add_scalar('Val/Dice', mvmd, epoch) else: vmean_loss, valuev1 = infer(args, model, criterion, val_loader) vmr, vms, vmp, vmf, vmjc, vmd, vmacc = valuev1 logger.info( "Epoch:{} Val_Loss:{:.3f} Acc:{:.3f} Dice:{:.3f} Jc:{:.3f}".format(epoch, vmean_loss, vmacc, vmd, vmjc)) is_best = True if (vmjc >=max_value) else False max_value = max(max_value, vmjc) writer.add_scalar('Val/Loss', vmean_loss, epoch) writer.add_scalar('Val/mAcc', vmacc, epoch) writer.add_scalar('Val/Recall', vmr, epoch) writer.add_scalar('Val/Specifi', vms, epoch) writer.add_scalar('Val/Precision', vmp, epoch) writer.add_scalar('Val/F1', vmf, epoch) writer.add_scalar('Val/Jc', vmjc, epoch) writer.add_scalar('Val/Dice', vmd, epoch) state={ 'epoch': epoch, 'optimizer': optimizer.state_dict(), 'state_dict': model.state_dict(), 'scheduler': model.state_dict(), } logger.info("epoch:{} best:{} max_value:{}".format(epoch,is_best,max_value)) if not is_best: torch.save(state,os.path.join(args.save_path,"checkpoint.pth.tar")) else: torch.save(state,os.path.join(args.save_path,"checkpoint.pth.tar")) torch.save(state,os.path.join(args.save_path,"model_best.pth.tar")) writer.close()
def main(args): #################### init logger ################################### log_dir = './eval' + '/{}'.format(args.dataset) + '/{}'.format(args.model) logger = get_logger(log_dir) print('RUNDIR: {}'.format(log_dir)) logger.info('{}-Eval'.format(args.model)) # setting args.save_path = log_dir args.save_images = os.path.join(args.save_path, "images") if not os.path.exists(args.save_images): os.mkdir(args.save_images) ##################### init device ################################# if args.manualSeed is None: args.manualSeed = random.randint(1, 10000) np.random.seed(args.manualSeed) torch.manual_seed(args.manualSeed) args.use_cuda = args.gpus > 0 and torch.cuda.is_available() args.device = torch.device('cuda' if args.use_cuda else 'cpu') if args.use_cuda: torch.cuda.manual_seed(args.manualSeed) cudnn.benchmark = True ####################### init dataset ########################################### val_loader = get_dataloder(args, split_flag="valid") ######################## init model ############################################ if args.model == "layer7_double_deep_ep1600_8lr4e-3": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'layer7_double_deep' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=9, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) args.model_path = './logs/cvc/layer7_double_deep_ep1600_8lr4e-3/model_best.pth.tar' model.load_state_dict( torch.load(args.model_path, map_location='cpu')['state_dict']) elif args.model == "alpha0_double_deep": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'alpha0_stage1_double_deep_ep200' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) args.model_path = './logs/cvc/alpha0_8lr4e-3/model_best.pth.tar' state_dict = torch.load(args.model_path, map_location='cpu')['state_dict'] state_dict = remove_module(state_dict) model.load_state_dict(state_dict) elif args.model == "alpha0_5_double_deep": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'alpha0_5_stage1_double_deep_ep80' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) args.model_path = './logs/cvc/alpha0_5_8lr4e-3/model_best.pth.tar' state_dict = torch.load(args.model_path, map_location='cpu')['state_dict'] state_dict = remove_module(state_dict) model.load_state_dict(state_dict) elif args.model == "alpha1_double_deep": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'alpha0_5_stage1_double_deep_ep80' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) args.model_path = './logs/cvc/alpha1_8lr4e-3/model_best.pth.tar' state_dict = torch.load(args.model_path, map_location='cpu')['state_dict'] state_dict = remove_module(state_dict) model.load_state_dict(state_dict) else: raise NotImplementedError() setting = {k: v for k, v in args._get_kwargs()} logger.info(setting) logger.info(genotype) logger.info('param size = %fMB', calc_parameters_count(model)) # init loss if args.loss == 'bce': criterion = nn.BCELoss() elif args.loss == 'bcelog': criterion = nn.BCEWithLogitsLoss() elif args.loss == "dice": criterion = DiceLoss() elif args.loss == "softdice": criterion = SoftDiceLoss() elif args.loss == 'bcedice': criterion = BCEDiceLoss() else: criterion = nn.CrossEntropyLoss() if args.use_cuda: logger.info("load model and criterion to gpu !") model = model.to(args.device) criterion = criterion.to(args.device) infer(args, model, criterion, val_loader, logger, args.save_images)
def main(args): #################### init logger ################################### args.model_list=["unet","unet++",'attention_unet_v1','multires_unet','r2unet_t3'] for model_name in args.model_list: if model_name=='unet': args.model='unet' model_weight_path='./logs/unet_ep1600/cvc/20200312-143050/model_best.pth.tar' model=get_models(args) model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) elif model_name=='unet++': args.model='unet++' args.deepsupervision=False model_weight_path='./logs/unet++_ep1600/cvc/20200312-143358/model_best.pth.tar' model=get_models(args) model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) elif model_name == 'attention_unet_v1': args.model = 'attention_unet_v1' model_weight_path = './logs/attention_unet_v1_ep1600/cvc/20200312-143413/model_best.pth.tar' model = get_models(args) model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) elif model_name == 'multires_unet': args.model = 'multires_unet' model_weight_path = './logs/multires_unet_ep1600_t2/20200322-194117/model_best.pth.tar' model = get_models(args) model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) # change bn relu order elif model_name == 'multires_unet_align': args.model = 'multires_unet' model_weight_path = './logs/multires_unet_ep1600_chbnrelu/20200327-184457/model_best.pth.tar' model = get_models(args) model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) elif model_name == 'r2unet_t3': args.model = 'r2unet' args.time_step=3 model_weight_path = './logs/r2unet_ep1600_t2/20200324-032815/model_best.pth.tar' model = get_models(args) model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) elif model_name == 'unet_ep800dice': args.model = 'unet' model_weight_path = './logs/unet_ep800_bcedice/cvc/20200315-043021/model_best.pth.tar' model = get_models(args) model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) elif model_name=='unet++_nodeep_ep800dice': args.model='unet++' args.deepsupervision=False model_weight_path='./logs/unet++_ep800_bcedice/cvc/20200315-043214/model_best.pth.tar' model=get_models(args) model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) elif model_name == 'unet++_deep_ep800dice': args.model = 'unet++' args.deepsupervision = True model_weight_path = './logs/unet++_deep_ep800_bcedice/cvc/20200315-043134/model_best.pth.tar' model = get_models(args) model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) elif model_name == 'attention_unet_v1_ep800dice': args.model = 'attention_unet_v1' args.deepsupervision=False model_weight_path = './logs/attention_unet_v1_ep800_bcedice/cvc/20200315-043300/model_best.pth.tar' model = get_models(args) model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) elif model_name == 'multires_unet_ep800dice': args.model = 'multires_unet' args.deepsupervision=False model_weight_path = './logs/multires_unet_ep800_bcedice/cvc/20200312-173031/model_best.pth.tar' model = get_models(args) model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) else: raise NotImplementedError() assert os.path.exists(args.save) args.model_save_path=os.path.join(args.save,model_name) logger = get_logger(args.model_save_path) args.save_images= os.path.join(args.model_save_path,"images") if not os.path.exists(args.save_images): os.mkdir(args.save_images) if args.manualSeed is None: args.manualSeed = random.randint(1, 10000) np.random.seed(args.manualSeed) torch.manual_seed(args.manualSeed) args.use_cuda = args.gpus > 0 and torch.cuda.is_available() args.device = torch.device('cuda' if args.use_cuda else 'cpu') if args.use_cuda: torch.cuda.manual_seed(args.manualSeed) cudnn.benchmark = True val_loader = get_dataloder(args, split_flag="valid") setting = {k: v for k, v in args._get_kwargs()} logger.info(setting) logger.info('param size = %fMB', calc_parameters_count(model)) # init loss if args.loss == 'bce': criterion = nn.BCELoss() elif args.loss == 'bcelog': criterion = nn.BCEWithLogitsLoss() elif args.loss == "dice": criterion = DiceLoss() elif args.loss == "softdice": criterion = SoftDiceLoss() elif args.loss == 'bcedice': criterion = BCEDiceLoss() else: criterion = nn.CrossEntropyLoss() if args.use_cuda: logger.info("load model and criterion to gpu !") model = model.to(args.device) criterion = criterion.to(args.device) infer(args, model, criterion, val_loader,logger,args.save_images)
def main(args): #################### init logger ################################### # args.model_list=["unet","unet++_deep","unet++_nodeep",'attention_unet_v1','multires_unet','r2unet_t3', # 'unet_ep800dice','unet++_deep_ep800dice','unet++_nodeep_ep800dice','attention_unet_v1_ep800dice','multires_unet_ep800dice' # ] args.model_list = ['unet', 'unet++', "attention_unet", "multires_unet"] for model_name in args.model_list: if model_name == 'unet': args.model = 'unet' model_weight_path = './logs/chaos/unet_ep150_v2/20200403-134703/checkpoint.pth.tar' model = get_models(args) model.load_state_dict( torch.load(model_weight_path, map_location='cpu')['state_dict']) elif model_name == 'unet++': args.model = 'unet++' args.deepsupervision = False model_weight_path = './logs/chaos/unet++_ep150_v2/20200403-135401/checkpoint.pth.tar' model = get_models(args) model.load_state_dict( torch.load(model_weight_path, map_location='cpu')['state_dict']) # elif model_name == 'unet++_deep': # args.model = 'unet++' # args.deepsupervision = True # model_weight_path = './logs/unet++_deep_ep1600/cvc/20200312-143345/model_best.pth.tar' # model = get_models(args) # model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) elif model_name == 'attention_unet': args.model = 'attention_unet_v1' args.deepsupervision = False model_weight_path = './logs/chaos/attention_unet_v1_ep150_v2/20200403-135445/checkpoint.pth.tar' model = get_models(args) model.load_state_dict( torch.load(model_weight_path, map_location='cpu')['state_dict']) elif model_name == 'multires_unet': args.model = 'multires_unet' args.deepsupervision = False model_weight_path = './logs/chaos/multires_unet_ep150_v2/20200403-135549/checkpoint.pth.tar' model = get_models(args) model.load_state_dict( torch.load(model_weight_path, map_location='cpu')['state_dict']) else: raise NotImplementedError() assert os.path.exists(args.save) args.model_save_path = os.path.join(args.save, model_name) logger = get_logger(args.model_save_path) args.save_images = os.path.join(args.model_save_path, "images") if not os.path.exists(args.save_images): os.mkdir(args.save_images) if args.manualSeed is None: args.manualSeed = random.randint(1, 10000) np.random.seed(args.manualSeed) torch.manual_seed(args.manualSeed) args.use_cuda = args.gpus > 0 and torch.cuda.is_available() args.device = torch.device('cuda' if args.use_cuda else 'cpu') if args.use_cuda: torch.cuda.manual_seed(args.manualSeed) cudnn.benchmark = True val_loader = get_dataloder(args, split_flag="valid") setting = {k: v for k, v in args._get_kwargs()} logger.info(setting) logger.info('param size = %fMB', calc_parameters_count(model)) # init loss if args.loss == 'bce': criterion = nn.BCELoss() elif args.loss == 'bcelog': criterion = nn.BCEWithLogitsLoss() elif args.loss == "dice": criterion = DiceLoss() elif args.loss == "softdice": criterion = SoftDiceLoss() elif args.loss == 'bcedice': criterion = BCEDiceLoss() else: criterion = nn.CrossEntropyLoss() if args.use_cuda: logger.info("load model and criterion to gpu !") model = model.to(args.device) criterion = criterion.to(args.device) infer(args, model, criterion, val_loader, logger, args.save_images)
def main(args): #################### init logger ################################### #args.model_list=["unet","unet++_deep",'attention_unet_v1','multires_unet', 'r2unet_t3'] args.model_list = [ "unet", "unet++_deep", 'unet++_nodeep', "attention_unet_v1", "multires_unet", "r2unet" ] for model_name in args.model_list: # if model_name=='unet': # args.model='unet' # model_weight_path='./logs/isic/logs_coslr/unet/isic2018/20200229-035150/checkpoint.pth.tar' # model=get_models(args) # model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) # elif model_name=='unet++_deep': # args.model='unet++' # args.deepsupervision=True # model_weight_path='./logs/isic/logs_coslr/unet++/isic2018/20200229-035514/checkpoint.pth.tar' # model=get_models(args) # model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) # elif model_name == 'unet++_nodeep': # args.model = 'unet++' # args.deepsupervision = False # model_weight_path = '/checkpoint.pth.tar' # model = get_models(args) # model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) # elif model_name == 'attention_unet_v1': # args.model = 'attention_unet_v1' # model_weight_path = './logs/isic/logs_coslr/attention_unet_v1/isic2018/20200302-190718/checkpoint.pth.tar' # args.deepsupervision=False # model = get_models(args) # model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) # # elif model_name == 'multires_unet': # args.model = 'multires_unet' # model_weight_path = './logs/isic/logs_coslr/multires_unet/isic2018/20200229-035734/checkpoint.pth.tar' # model = get_models(args) # model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) # # elif model_name == 'r2unet_t3': # args.model = 'r2unet' # args.time_step=3 # model_weight_path = './logs/isic/logs_coslr/r2unet/isic2018/20200302-190808/checkpoint.pth.tar' # model = get_models(args) # model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) # ep300 baseline if model_name == 'unet': args.model = 'unet' model_weight_path = './logs/isic2018/unet_ep300/20200402-135108/model_best.pth.tar' model = get_models(args) model.load_state_dict( torch.load(model_weight_path, map_location='cpu')['state_dict']) elif model_name == 'unet++_deep': args.model = 'unet++' args.deepsupervision = True model_weight_path = './logs/isic2018/unet++_ep300_deep/20200402-135243/model_best.pth.tar' model = get_models(args) model.load_state_dict( torch.load(model_weight_path, map_location='cpu')['state_dict']) elif model_name == 'unet++_nodeep': args.model = 'unet++' args.deepsupervision = False model_weight_path = './logs/isic2018/unet++_ep300/20200402-135317/model_best.pth.tar' model = get_models(args) model.load_state_dict( torch.load(model_weight_path, map_location='cpu')['state_dict']) elif model_name == 'attention_unet_v1': args.model = 'attention_unet_v1' args.deepsupervision = False model_weight_path = './logs/isic2018/attention_unet_v1_ep300/20200413-160808//model_best.pth.tar' model = get_models(args) model.load_state_dict( torch.load(model_weight_path, map_location='cpu')['state_dict']) elif model_name == 'multires_unet': args.model = 'multires_unet' args.deepsupervision = False model_weight_path = './logs/isic2018/attention_unet_v1_ep300/20200413-160808//model_best.pth.tar' model = get_models(args) model.load_state_dict( torch.load(model_weight_path, map_location='cpu')['state_dict']) elif model_name == 'r2unet': args.model = 'r2unet' args.deepsupervision = False model_weight_path = './logs/isic2018/attention_unet_v1_ep300/20200413-160808//model_best.pth.tar' model = get_models(args) model.load_state_dict( torch.load(model_weight_path, map_location='cpu')['state_dict']) # elif model_name == 'attention_unet_v1': # args.model = 'attention_unet_v1' # model_weight_path = './logs/isic/logs_coslr/attention_unet_v1/isic2018/20200302-190718/checkpoint.pth.tar' # args.deepsupervision=False # model = get_models(args) # model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) # # elif model_name == 'multires_unet': # args.model = 'multires_unet' # model_weight_path = './logs/isic/logs_coslr/multires_unet/isic2018/20200229-035734/checkpoint.pth.tar' # model = get_models(args) # model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) # # elif model_name == 'r2unet_t3': # args.model = 'r2unet' # args.time_step=3 # model_weight_path = './logs/isic/logs_coslr/r2unet/isic2018/20200302-190808/checkpoint.pth.tar' # model = get_models(args) # model.load_state_dict(torch.load(model_weight_path, map_location='cpu')['state_dict']) else: raise NotImplementedError() assert os.path.exists(args.save) args.model_save_path = os.path.join(args.save, model_name) logger = get_logger(args.model_save_path) args.save_images = os.path.join(args.model_save_path, "images") if not os.path.exists(args.save_images): os.mkdir(args.save_images) if args.manualSeed is None: args.manualSeed = random.randint(1, 10000) np.random.seed(args.manualSeed) torch.manual_seed(args.manualSeed) args.use_cuda = args.gpus > 0 and torch.cuda.is_available() args.device = torch.device('cuda' if args.use_cuda else 'cpu') if args.use_cuda: torch.cuda.manual_seed(args.manualSeed) cudnn.benchmark = True val_loader = get_dataloder(args, split_flag="valid") setting = {k: v for k, v in args._get_kwargs()} logger.info(setting) logger.info('param size = %fMB', calc_parameters_count(model)) # init loss if args.loss == 'bce': criterion = nn.BCELoss() elif args.loss == 'bcelog': criterion = nn.BCEWithLogitsLoss() elif args.loss == "dice": criterion = DiceLoss() elif args.loss == "softdice": criterion = SoftDiceLoss() elif args.loss == 'bcedice': criterion = BCEDiceLoss() else: criterion = nn.CrossEntropyLoss() if args.use_cuda: logger.info("load model and criterion to gpu !") model = model.to(args.device) criterion = criterion.to(args.device) infer(args, model, criterion, val_loader, logger, args.save_images)
def main(args): args.model_list = [ 'double_deep', 'double', 'nodouble', 'nodouble_deep', 'slim_dd', 'slim_double', 'slim_nodouble', 'slim_nodouble_deep' ] #args.model_list=["slim_nodouble_deep_init32"] for model_name in args.model_list: print(model_name) if model_name == "double_deep": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'stage1_layer9_110epoch_double_deep_final' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) args.model_path = './logs/isic2018/prune_20200313-063406_32_32_ep300_double_deep/model_best.pth.tar' model.load_state_dict( torch.load(args.model_path, map_location='cpu')['state_dict']) elif model_name == 'double': args.deepsupervision = False args.double_down_channel = True args.genotype_name = 'stage1_layer9_110epoch_double_final' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) args.model_path = './logs/isic2018/prune_20200313-063428_32_32_ep300_double/model_best.pth.tar' model.load_state_dict( torch.load(args.model_path, map_location='cpu')['state_dict']) elif model_name == 'nodouble': args.deepsupervision = False args.double_down_channel = False args.genotype_name = 'stage1_layer9_110epoch_final' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) args.model_path = './logs/isic2018/prune_20200316-141125_nodouble_32_ep300/model_best.pth.tar' model.load_state_dict( torch.load(args.model_path, map_location='cpu')['state_dict']) elif model_name == 'nodouble_deep': args.deepsupervision = True args.double_down_channel = False args.genotype_name = 'stage1_layer9_110epoch_deep_final' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) args.model_path = './logs/isic2018/prune_20200316-141242_nodouble_32_ep300_deep/model_best.pth.tar' model.load_state_dict( torch.load(args.model_path, map_location='cpu')['state_dict']) if model_name == "slim_dd": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'stage1_layer9_110epoch_double_deep_final' genotype = eval('genotypes.%s' % args.genotype_name) model = net_dd(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) args.model_path = './logs/isic2018/dd_20200319-170442_ep300/model_best.pth.tar' model.load_state_dict( torch.load(args.model_path, map_location='cpu')['state_dict']) elif model_name == 'slim_double': args.deepsupervision = False args.double_down_channel = True args.genotype_name = 'stage1_layer9_110epoch_double_final' genotype = eval('genotypes.%s' % args.genotype_name) model = net_double(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) args.model_path = './logs/isic2018/double_20200319-170621_ep300/model_best.pth.tar' model.load_state_dict( torch.load(args.model_path, map_location='cpu')['state_dict']) elif model_name == 'slim_nodouble': args.deepsupervision = False args.double_down_channel = False args.genotype_name = 'stage1_layer9_110epoch_final' genotype = eval('genotypes.%s' % args.genotype_name) model = net_nodouble(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) args.model_path = './logs/isic2018/nodouble_20200319-210910_ep300/model_best.pth.tar' model.load_state_dict( torch.load(args.model_path, map_location='cpu')['state_dict']) elif model_name == 'slim_nodouble_deep': args.deepsupervision = True args.double_down_channel = False args.genotype_name = 'stage1_layer9_110epoch_deep_final' genotype = eval('genotypes.%s' % args.genotype_name) model = net_nodouble_deep( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) args.model_path = './logs/isic2018/nodouble_deep_20200319-210600_ep300/model_best.pth.tar' model.load_state_dict( torch.load(args.model_path, map_location='cpu')['state_dict']) elif model_name == 'slim_nodouble_deep_init32': args.deepsupervision = True args.double_down_channel = False args.genotype_name = 'stage1_layer9_110epoch_deep_final' genotype = eval('genotypes.%s' % args.genotype_name) model = net_nodouble_deep( genotype=genotype, input_c=args.in_channels, c=32, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) args.model_path = './logs/isic2018/nodouble_deep_ep300_init32/model_best.pth.tar' model.load_state_dict( torch.load(args.model_path, map_location='cpu')['state_dict']) #################### init logger ################################### log_dir = './eval' + '/{}'.format( args.dataset) + '/{}'.format(model_name) ##################### init model ######################################## logger = get_logger(log_dir) print('RUNDIR: {}'.format(log_dir)) logger.info('{}-Eval'.format(model_name)) # setting args.save_path = log_dir args.save_images = os.path.join(args.save_path, "images") if not os.path.exists(args.save_images): os.mkdir(args.save_images) ##################### init device ################################# if args.manualSeed is None: args.manualSeed = random.randint(1, 10000) np.random.seed(args.manualSeed) torch.manual_seed(args.manualSeed) args.use_cuda = args.gpus > 0 and torch.cuda.is_available() args.device = torch.device('cuda' if args.use_cuda else 'cpu') if args.use_cuda: torch.cuda.manual_seed(args.manualSeed) cudnn.benchmark = True ####################### init dataset ########################################### # sorted vaild datasets val_loader = get_dataloder(args, split_flag="valid") setting = {k: v for k, v in args._get_kwargs()} logger.info(setting) logger.info(genotype) logger.info('param size = %fMB', calc_parameters_count(model)) # init loss if args.loss == 'bce': criterion = nn.BCELoss() elif args.loss == 'bcelog': criterion = nn.BCEWithLogitsLoss() elif args.loss == "dice": criterion = DiceLoss() elif args.loss == "softdice": criterion = SoftDiceLoss() elif args.loss == 'bcedice': criterion = BCEDiceLoss() else: criterion = nn.CrossEntropyLoss() if args.use_cuda: logger.info("load model and criterion to gpu !") model = model.to(args.device) criterion = criterion.to(args.device)
def main(args): #################### init logger ################################### log_dir = './logs/' + '{}'.format(args.dataset) + '/{}_{}_{}'.format( args.model, args.note, time.strftime('%Y%m%d-%H%M%S')) logger = get_logger(log_dir) print('RUNDIR: {}'.format(log_dir)) logger.info('{}-Train'.format(args.model)) # setting setting = {k: v for k, v in args._get_kwargs()} logger.info(setting) args.save_path = log_dir args.save_tbx_log = args.save_path + '/tbx_log' writer = SummaryWriter(args.save_tbx_log) ##################### init device ################################# if args.manualSeed is None: args.manualSeed = random.randint(1, 10000) np.random.seed(args.manualSeed) torch.manual_seed(args.manualSeed) args.use_cuda = args.gpus > 0 and torch.cuda.is_available() args.device = torch.device('cuda' if args.use_cuda else 'cpu') if args.use_cuda: torch.cuda.manual_seed(args.manualSeed) cudnn.benchmark = True ####################### init dataset ########################################### train_loader = get_dataloder(args, split_flag="train") val_loader = get_dataloder(args, split_flag="valid") ######################## init model ############################################ # model ############init model ########################### if args.model == "nodouble_deep_init32_ep100": args.deepsupervision = True args.double_down_channel = False args.genotype_name = 'nodouble_deep_init32_ep100' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=32, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=9, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "nodouble_deep_isic": args.deepsupervision = True args.double_down_channel = False args.genotype_name = 'stage1_layer9_110epoch_deep_final' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "nodouble_deep_drop02_layer7end": args.deepsupervision = True args.double_down_channel = False args.genotype_name = 'nodouble_deep_drop02_layer7end' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "stage1_nodouble_deep_ep36": args.deepsupervision = True args.double_down_channel = False args.genotype_name = 'stage1_nodouble_deep_ep36' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "stage1_nodouble_deep_ep63": args.deepsupervision = True args.double_down_channel = False args.genotype_name = 'stage1_nodouble_deep_ep63' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "stage1_nodouble_deep_ep83": args.deepsupervision = True args.double_down_channel = False args.genotype_name = 'stage1_nodouble_deep_ep83' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "alpha1_stage1_double_deep_ep80": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'alpha1_stage1_double_deep_ep80' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "alpha0_stage1_double_deep_ep80": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'alpha0_stage1_double_deep_ep80' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "alpha0_5_stage1_double_deep_ep80": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'alpha0_5_stage1_double_deep_ep80' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) # isic trans elif args.model == "stage1_layer9_110epoch_double_deep_final": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'stage1_layer9_110epoch_double_deep_final' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) #chaos elif args.model == "stage0_double_deep_ep80_newim": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'stage0_double_deep_ep80_newim' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "stage1_double_deep_ep80": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'stage1_double_deep_ep80' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "stage1_double_deep_ep80_ts": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'stage1_double_deep_ep80_ts' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) # cvc trans elif args.model == "layer7_double_deep": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'layer7_double_deep' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) if torch.cuda.device_count() > 1 and args.use_cuda: logger.info('use: %d gpus', torch.cuda.device_count()) model = nn.DataParallel(model) setting = {k: v for k, v in args._get_kwargs()} logger.info(setting) logger.info(genotype) logger.info('param size = %fMB', calc_parameters_count(model)) # init loss if args.loss == 'bce': criterion = nn.BCELoss() elif args.loss == 'bcelog': criterion = nn.BCEWithLogitsLoss() elif args.loss == "dice": criterion = DiceLoss() elif args.loss == "softdice": criterion = SoftDiceLoss() elif args.loss == 'bcedice': criterion = BCEDiceLoss() elif args.loss == 'multibcedice': criterion = MultiClassEntropyDiceLoss() else: criterion = nn.CrossEntropyLoss() if args.use_cuda: logger.info("load model and criterion to gpu !") model = model.to(args.device) criterion = criterion.to(args.device) # init optimizer optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum) # init schedulers Steplr scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, args.epoch) # scheduler=torch.optim.lr_scheduler.StepLR(optimizer=optimizer,step_size=30,gamma=0.1,last_epoch=-1) ############################### check resume ######################### start_epoch = 0 if args.resume is not None: if os.path.isfile(args.resume): logger.info( "Loading model and optimizer from checkpoint '{}'".format( args.resume)) checkpoint = torch.load(args.resume, map_location=args.device) start_epoch = checkpoint['epoch'] optimizer.load_state_dict(checkpoint['optimizer']) model.load_state_dict(checkpoint['state_dict']) scheduler.load_state_dict(checkpoint['scheduler']) else: raise FileNotFoundError("No checkpoint found at '{}'".format( args.resume)) #################################### train and val ######################## max_value = 0 for epoch in range(start_epoch, args.epoch): # lr=adjust_learning_rate(args,optimizer,epoch) scheduler.step() # logger.info('Epoch: %d lr %e', epoch, scheduler.get_lr()[0]) # train total_loss = train(args, model, criterion, train_loader, optimizer, epoch, logger) # write writer.add_scalar('Train/total_loss', total_loss, epoch) # val tloss, md = val(args, model, criterion, val_loader, epoch, logger) writer.add_scalar('Val/total_loss', tloss, epoch) is_best = True if (md >= max_value) else False max_value = max(max_value, md) state = { 'epoch': epoch, 'optimizer': optimizer.state_dict(), 'state_dict': model.state_dict(), 'scheduler': model.state_dict(), } logger.info("epoch:{} best:{} max_value:{}".format( epoch, is_best, max_value)) if not is_best: torch.save(state, os.path.join(args.save_path, "checkpoint.pth.tar")) else: torch.save(state, os.path.join(args.save_path, "checkpoint.pth.tar")) torch.save(state, os.path.join(args.save_path, "model_best.pth.tar")) writer.close()
def main(args): args.device = torch.device('cuda') args.dataset = 'isic2018' args.train_batch = 2 args.val_batch = 2 args.num_workers = 2 args.crop_size = 256 args.base_size = 256 train_loader = get_dataloder(args, split_flag="train") #args.model = 'nodouble_deep' ######################## init model ############################################ # model # get the network parameters if args.model == 'dd': args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'stage1_layer9_110epoch_double_deep_final' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux ) print('param size = %fMB', calc_parameters_count(model)) elif args.model == 'double': args.deepsupervision = False args.double_down_channel = True args.genotype_name = 'stage1_layer9_110epoch_double_final' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux ) print('param size = %fMB', calc_parameters_count(model)) elif args.model == 'nodouble': args.deepsupervision = False args.double_down_channel = False args.genotype_name = 'stage1_layer9_110epoch_final' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux ) print('param size = %fMB', calc_parameters_count(model)) elif args.model == 'nodouble_deep': args.deepsupervision = True args.double_down_channel = False args.genotype_name = 'stage1_layer9_110epoch_deep_final' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux ) print('param size = %fMB', calc_parameters_count(model)) else: raise NotImplementedError() time=test_time(args,model,train_loader) print("Infrence time:{}".format(time))
def main(args): #################### init logger ################################### log_dir = './logs/' + '{}'.format(args.dataset) + '/{}_{}_{}'.format( args.model, time.strftime('%Y%m%d-%H%M%S'), args.note) logger = get_logger(log_dir) print('RUNDIR: {}'.format(log_dir)) logger.info('{}-Train'.format(args.model)) # setting args.save_path = log_dir args.save_tbx_log = args.save_path + '/tbx_log' writer = SummaryWriter(args.save_tbx_log) ##################### init device ################################# if args.manualSeed is None: args.manualSeed = random.randint(1, 10000) np.random.seed(args.manualSeed) torch.manual_seed(args.manualSeed) args.use_cuda = args.gpus > 0 and torch.cuda.is_available() args.device = torch.device('cuda' if args.use_cuda else 'cpu') if args.use_cuda: torch.cuda.manual_seed(args.manualSeed) cudnn.benchmark = True ####################### init dataset ########################################### train_loader = get_dataloder(args, split_flag="train") val_loader = get_dataloder(args, split_flag="valid") ######################## init model ############################################ # model # get the network parameters if args.model == "alpha_double_deep": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'stage1_layer9_110epoch_double_deep_final' args.alphas_model = './search_exp/Nas_Search_Unet/isic2018/deepsupervision/stage_1_model/checkpoint.pth.tar' model_alphas = torch.load( args.alphas_model, map_location=args.device)['alphas_dict']['alphas_network'] model_alphas.requires_grad = False model_alphas = F.softmax(model_alphas, dim=-1) genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnet(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "alpha_double": args.deepsupervision = False args.double_down_channel = True args.genotype_name = 'stage1_layer9_110epoch_double_final' args.alphas_model = './search_exp/Nas_Search_Unet/isic2018/nodeepsupervision/stage_1_model/checkpoint.pth.tar' model_alphas = torch.load( args.alphas_model, map_location=args.device)['alphas_dict']['alphas_network'] model_alphas.requires_grad = False model_alphas = F.softmax(model_alphas, dim=-1) genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnet(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "alpha_nodouble": args.deepsupervision = False args.double_down_channel = False args.genotype_name = 'stage1_layer9_110epoch_final' args.alphas_model = './search_exp/Nas_Search_Unet/isic2018/nodouble/stage_1_model/checkpoint.pth.tar' model_alphas = torch.load( args.alphas_model, map_location=args.device)['alphas_dict']['alphas_network'] model_alphas.requires_grad = False model_alphas = F.softmax(model_alphas, dim=-1) genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnet(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "alpha_nodouble_deep": args.deepsupervision = True args.double_down_channel = False args.genotype_name = 'stage1_layer9_110epoch_deep_final' args.alphas_model = './search_exp/Nas_Search_Unet/isic2018/nodouble_deep/stage_1_model/checkpoint.pth.tar' model_alphas = torch.load( args.alphas_model, map_location=args.device)['alphas_dict']['alphas_network'] model_alphas.requires_grad = False model_alphas = F.softmax(model_alphas, dim=-1) genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnet(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "double_deep": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'stage1_layer9_110epoch_double_deep_final' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "double": args.deepsupervision = False args.double_down_channel = True args.genotype_name = 'stage1_layer9_110epoch_double_final' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "nodouble": args.deepsupervision = False args.double_down_channel = False args.genotype_name = 'stage1_layer9_110epoch_final' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "nodouble_deep": args.deepsupervision = True args.double_down_channel = False args.genotype_name = 'stage1_layer9_110epoch_deep_final' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "alpha1_stage1_double_deep_ep80": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'alpha1_stage1_double_deep_ep80' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "alpha0_stage1_double_deep_ep80": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'alpha0_stage1_double_deep_ep80' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "alpha0_5_stage1_double_deep_ep80": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'alpha0_5_stage1_double_deep_ep80' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "alpha0_5_stage1_double_nodeep_ep80": args.deepsupervision = False args.double_down_channel = True args.genotype_name = 'alpha0_5_stage1_double_nodeep_ep80' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "alpha0_5_stage1_nodouble_deep_ep80": args.deepsupervision = True args.double_down_channel = False args.genotype_name = 'alpha0_5_stage1_nodouble_deep_ep80' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) elif args.model == "alpha0_5_stage1_nodouble_nodeep_ep80": args.deepsupervision = False args.double_down_channel = False args.genotype_name = 'alpha0_5_stage1_nodouble_nodeep_ep80' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) # cvc trans elif args.model == "layer7_double_deep": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'layer7_double_deep' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) # chaos trans elif args.model == "stage0_double_deep_ep80_newim": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'stage0_double_deep_ep80_newim' model_alphas = None genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune(genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) if torch.cuda.device_count() > 1 and args.use_cuda: logger.info('use: %d gpus', torch.cuda.device_count()) model = nn.DataParallel(model) setting = {k: v for k, v in args._get_kwargs()} logger.info(setting) logger.info(genotype) logger.info(model_alphas) flop, param = get_model_complexity_info(model, (3, 256, 256), as_strings=True, print_per_layer_stat=False) print("GFLOPs: {}".format(flop)) print("Params: {}".format(param)) # init loss if args.loss == 'bce': criterion = nn.BCELoss() elif args.loss == 'bcelog': criterion = nn.BCEWithLogitsLoss() elif args.loss == "dice": criterion = DiceLoss() elif args.loss == "softdice": criterion = SoftDiceLoss() elif args.loss == 'bcedice': criterion = BCEDiceLoss() else: criterion = nn.CrossEntropyLoss() if args.use_cuda: logger.info("load model and criterion to gpu !") model = model.to(args.device) criterion = criterion.to(args.device) # init optimizer optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum) # init schedulers Steplr scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( optimizer, args.epoch) #scheduler=torch.optim.lr_scheduler.StepLR(optimizer=optimizer,step_size=30,gamma=0.1,last_epoch=-1) ############################### check resume ######################### start_epoch = 0 if args.resume is not None: if os.path.isfile(args.resume): logger.info( "Loading model and optimizer from checkpoint '{}'".format( args.resume)) checkpoint = torch.load(args.resume, map_location=args.device) start_epoch = checkpoint['epoch'] optimizer.load_state_dict(checkpoint['optimizer']) model.load_state_dict(checkpoint['state_dict']) scheduler.load_state_dict(checkpoint['scheduler']) else: raise FileNotFoundError("No checkpoint found at '{}'".format( args.resume)) #################################### train and val ######################## max_value = 0 for epoch in range(start_epoch, args.epoch): # lr=adjust_learning_rate(args,optimizer,epoch) scheduler.step() # train if args.deepsupervision: mean_loss, value1, value2 = train(args, model_alphas, model, criterion, train_loader, optimizer) mr, ms, mp, mf, mjc, md, macc = value1 logger.info( "Epoch:{} Train_Loss:{:.3f} Acc:{:.3f} Dice:{:.3f} Jc:{:.3f}". format(epoch, mean_loss, macc, md, mjc)) writer.add_scalar('Train/dDice', mmd, epoch) else: mean_loss, value1 = train(args, model_alphas, model, criterion, train_loader, optimizer) mr, ms, mp, mf, mjc, md, macc = value1 logger.info( "Epoch:{} Train_Loss:{:.3f} Acc:{:.3f} Dice:{:.3f} Jc:{:.3f}". format(epoch, mean_loss, macc, md, mjc)) # write writer.add_scalar('Train/Loss', mean_loss, epoch) # val if args.deepsupervision: vmean_loss, valuev1, valuev2 = infer(args, model_alphas, model, criterion, val_loader) vmr, vms, vmp, vmf, vmjc, vmd, vmacc = valuev1 logger.info( "Epoch:{} Val_Loss:{:.3f} Acc:{:.3f} Dice:{:.3f} Jc:{:.3f}". format(epoch, vmean_loss, vmacc, vmd, vmjc)) else: vmean_loss, valuev1 = infer(args, model_alphas, model, criterion, val_loader) vmr, vms, vmp, vmf, vmjc, vmd, vmacc = valuev1 logger.info( "Epoch:{} Val_Loss:{:.3f} Acc:{:.3f} Dice:{:.3f} Jc:{:.3f}". format(epoch, vmean_loss, vmacc, vmd, vmjc)) is_best = True if vmjc >= max_value else False max_value = max(max_value, vmjc) writer.add_scalar('Val/Loss', vmean_loss, epoch) state = { 'epoch': epoch, 'optimizer': optimizer.state_dict(), 'state_dict': model.state_dict(), 'scheduler': model.state_dict(), } logger.info("epoch:{} best:{} max_value:{}".format( epoch, is_best, max_value)) if not is_best: torch.save(state, os.path.join(args.save_path, "checkpoint.pth.tar")) else: torch.save(state, os.path.join(args.save_path, "checkpoint.pth.tar")) torch.save(state, os.path.join(args.save_path, "model_best.pth.tar")) writer.close()
def main(args): #args.model_list=['alpha0_double_deep_0.01','alpha0_5_double_deep_0.01','alpha1_double_deep_0.01','nodouble_deep','slim_dd','slim_double','slim_nodouble','slim_nodouble_deep'] #args.model_list=["double_deep","nodouble_deep","slim_nodouble"] #args.model_list=["slim_nodouble_deep_init32"] #args.model_list=["slim_nodouble_deep_init48"] args.model_list = [ 'alpha0_double_deep_0.01', 'alpha0_5_double_deep_0.01', 'alpha1_double_deep_0.01' ] for model_name in args.model_list: if model_name == "alpha0_double_deep_0.01": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'alpha0_stage1_double_deep_ep200' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) args.model_path = './logs/isic2018/alpha0_double_deep_0.01/model_best.pth.tar' # kwargs = {'map_location': lambda storage, loc: storage.cuda(0)} # state_dict = torch.load(args.model_path, **kwargs) # # create new OrderedDict that does not contain `module.` # model.load_state_dict(state_dict) state_dict = torch.load(args.model_path, map_location='cpu')['state_dict'] state_dict = remove_module(state_dict) model.load_state_dict(state_dict) elif model_name == "alpha0_5_double_deep_0.01": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'alpha0_5_stage1_double_deep_ep80' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) args.model_path = './logs/isic2018/alpha0_5_double_deep_0.01/model_best.pth.tar' state_dict = torch.load(args.model_path, map_location='cpu')['state_dict'] state_dict = remove_module(state_dict) model.load_state_dict(state_dict) #model.load_state_dict(torch.load(args.model_path, map_location='cpu')['state_dict']) elif model_name == "alpha1_double_deep_0.01": args.deepsupervision = True args.double_down_channel = True args.genotype_name = 'alpha1_stage1_double_deep_ep200' genotype = eval('genotypes.%s' % args.genotype_name) model = BuildNasUnetPrune( genotype=genotype, input_c=args.in_channels, c=args.init_channels, num_classes=args.nclass, meta_node_num=args.middle_nodes, layers=args.layers, dp=args.dropout_prob, use_sharing=args.use_sharing, double_down_channel=args.double_down_channel, aux=args.aux) args.model_path = './logs/isic2018/alpha1_double_deep_0.01/model_best.pth.tar' state_dict = torch.load(args.model_path, map_location='cpu')['state_dict'] state_dict = remove_module(state_dict) model.load_state_dict(state_dict) #model.load_state_dict(torch.load(args.model_path, map_location='cpu')['state_dict']) #################### init logger ################################### log_dir = './eval' + '/{}'.format( args.dataset) + '/{}'.format(model_name) ##################### init model ######################################## logger = get_logger(log_dir) print('RUNDIR: {}'.format(log_dir)) logger.info('{}-Eval'.format(model_name)) # setting args.save_path = log_dir args.save_images = os.path.join(args.save_path, "images") if not os.path.exists(args.save_images): os.mkdir(args.save_images) ##################### init device ################################# if args.manualSeed is None: args.manualSeed = random.randint(1, 10000) np.random.seed(args.manualSeed) torch.manual_seed(args.manualSeed) args.use_cuda = args.gpus > 0 and torch.cuda.is_available() args.device = torch.device('cuda' if args.use_cuda else 'cpu') if args.use_cuda: torch.cuda.manual_seed(args.manualSeed) cudnn.benchmark = True ####################### init dataset ########################################### # sorted vaild datasets val_loader = get_dataloder(args, split_flag="valid") setting = {k: v for k, v in args._get_kwargs()} logger.info(setting) logger.info(genotype) logger.info('param size = %fMB', calc_parameters_count(model)) # init loss if args.loss == 'bce': criterion = nn.BCELoss() elif args.loss == 'bcelog': criterion = nn.BCEWithLogitsLoss() elif args.loss == "dice": criterion = DiceLoss() elif args.loss == "softdice": criterion = SoftDiceLoss() elif args.loss == 'bcedice': criterion = BCEDiceLoss() else: criterion = nn.CrossEntropyLoss() if args.use_cuda: logger.info("load model and criterion to gpu !") model = model.to(args.device) criterion = criterion.to(args.device) infer(args, model, criterion, val_loader, logger, args.save_images)