コード例 #1
0
def get_model(model_path, model_type):
    """

    :param model_path:
    :param model_type: 'UNet', 'UNet11', 'UNet16', 'AlbuNet34'
    :return:
    """

    num_classes = 1

    if model_type == 'UNet11':
        model = UNet11(num_classes=num_classes)
    elif model_type == 'UNet16':
        model = UNet16(num_classes=num_classes)
    elif model_type == 'AlbuNet34':
        model = AlbuNet34(num_classes=num_classes)
    elif model_type == 'UNet':
        model = UNet(num_classes=num_classes)
    else:
        model = UNet(num_classes=num_classes)

    state = torch.load(str(model_path))
    state = {
        key.replace('module.', ''): value
        for key, value in state['model'].items()
    }
    model.load_state_dict(state)

    if torch.cuda.is_available():
        return model.cuda()

    model.eval()

    return model
コード例 #2
0
def get_model(model_path, model_type):
    """

    :param model_path:
    :param model_type: 'UNet', 'UNet11', 'UNet16', 'AlbuNet34'
    :return:
    """

    num_classes = 1

    if model_type == 'UNet11':
        model = UNet11(num_classes=num_classes)
    elif model_type == 'UNet16':
        model = UNet16(num_classes=num_classes)
    elif model_type == 'AlbuNet34':
        model = AlbuNet34(num_classes=num_classes)
    elif model_type == 'MDeNet':
        print('Mine MDeNet..................')
        model = MDeNet(num_classes=num_classes)
    elif model_type == 'EncDec':
        print('Mine EncDec..................')
        model = EncDec(num_classes=num_classes)
    elif model_type == 'hourglass':
        model = hourglass(num_classes=num_classes)
    elif model_type == 'MDeNetplus':
        print('load MDeNetplus..................')
        model = MDeNetplus(num_classes=num_classes)
    elif model_type == 'UNet':
        model = UNet(num_classes=num_classes)
    else:
        print('I am here')
        model = UNet(num_classes=num_classes)

    state = torch.load(str(model_path))
    state = {
        key.replace('module.', ''): value
        for key, value in state['model'].items()
    }
    model.load_state_dict(state)

    if torch.cuda.is_available():
        return model.cuda()

    model.eval()

    return model
コード例 #3
0
def unlabel_prediction(PATH_model, unlabel_name_file):
    device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")

    num_classes = 1
    model = UNet11(num_classes=num_classes)

    model.to('cuda:1')
    model.load_state_dict(torch.load(PATH_model))
    model.eval()
    ######################### setting all data paths#######
    outfile_path = 'predictions_VHR/unlabel_test/'
    data_path = 'data_VHR'
    test_path = "data_VHR/unlabel/" + unlabel_name_file

    get_files_path = test_path + "/*.npy"
    test_file_names = np.array(sorted(glob.glob(get_files_path)))
    ###################################

    test_transform = DualCompose([CenterCrop(512), ImageOnly(Normalize())])

    test_loader = make_loader(test_file_names,
                              shuffle=False,
                              transform=test_transform)
    metrics = defaultdict(float)

    count_img = 0
    input_vec = []
    pred_vec = []
    for inputs, name in test_loader:
        inputs = inputs.to(device)
        with torch.set_grad_enabled(False):
            input_vec.append(inputs.data.cpu().numpy())
            pred = model(inputs)
            pred = torch.sigmoid(pred)

            pred_vec.append(pred.data.cpu().numpy())
            count_img += 1
    print(count_img)
    name_imgs = outfile_path + unlabel_name_file + "_inputs_unlab_" + str(
        count_img) + ".npy"
    name_preds = outfile_path + unlabel_name_file + "pred_unlab_" + str(
        count_img) + ".npy"

    np.save(name_imgs, np.array(input_vec))
    np.save(name_preds, np.array(pred_vec))
    return name_imgs, name_preds
コード例 #4
0
ファイル: generate_masks.py プロジェクト: BenGab/irob_surg
def get_model(model_path, model_type='UNet11', problem_type='binary'):
    """

    :param model_path:
    :param model_type: 'UNet', 'UNet16', 'UNet11', 'LinkNet34', 'AlbuNet'
    :param problem_type: 'binary', 'parts', 'instruments'
    :return:
    """
    if problem_type == 'binary':
        num_classes = 1
    elif problem_type == 'parts':
        num_classes = 4
    elif problem_type == 'instruments':
        num_classes = 8

    if model_type == 'UNet16':
        model = UNet16(num_classes=num_classes)
    elif model_type == 'UNet11':
        model = UNet11(num_classes=num_classes)
    elif model_type == 'LinkNet34':
        model = LinkNet34(num_classes=num_classes)
    elif model_type == 'AlbuNet':
        model = AlbuNet(num_classes=num_classes)
    elif model_type == 'UNet':
        model = UNet(num_classes=num_classes)

    state = None
    if torch.cuda.is_available():
        state = torch.load(str(model_path))
    else:
        state = torch.load(str(model_path), map_location='cpu')

    state = {
        key.replace('module.', ''): value
        for key, value in state['model'].items()
    }
    model.load_state_dict(state)

    if torch.cuda.is_available():
        return model.cuda()

    model.eval()

    return model
コード例 #5
0
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--device-ids', type=str, default='0', help='For example 0,1 to run on two GPUs')
    arg('--fold-out', type=int, help='fold train test', default=0)
    arg('--fold-in', type=int, help='fold train val', default=0)
    arg('--percent', type=float, help='percent of data', default=1)
    arg('--root', default='runs/debug', help='checkpoint root')
    arg('--batch-size', type=int, default=4)
    arg('--limit', type=int, default=10000, help='number of images in epoch')
    arg('--n-epochs', type=int, default=40)
    arg('--n-steps', type=int, default=200)
    arg('--lr', type=float, default=0.003) 
    arg('--modelVHR', type=str, default='UNet11', choices=['UNet11','UNet','AlbuNet34','SegNet'])
    arg('--dataset-path-HR', type=str, default='data_HR', help='ain path  of the HR dataset')
    arg('--model-path-HR', type=str, default='logs_HR/mapping/model_40epoch_HR_UNet11.pth', help='path of the model of HR')
    arg('--dataset-path-VHR', type=str, default='data_VHR', help='ain path  of the VHR dataset')
    arg('--name-file-HR', type=str, default='_HR', help='name file of HR dataset')
    arg('--dataset-file', type=str, default='VHR', help='main dataset resolution,depend of this correspond a specific crop' )
    arg('--out-file', type=str, default='seq', help='the file in which save the outputs')
    arg('--train-val-file-HR', type=str, default='train_val_HR', help='name of the train-val file' )
    arg('--test-file-HR', type=str, default='test_HR', help='name of the test file' )
    arg('--train-val-file-VHR', type=str, default='train_val_850', help='name of the train-val file' )
    arg('--test-file-VHR', type=str, default='test_850', help='name of the test file' )
    
    args = parser.parse_args()
    
    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    num_classes = 1 
    input_channels=4

    if args.modelVHR == 'UNet11':
        model_VHR = UNet11(num_classes=num_classes, input_channels=input_channels)
    elif args.modelVHR == 'UNet':
        model_VHR = UNet(num_classes=num_classes, input_channels=input_channels)
    elif args.modelVHR == 'AlbuNet34':
        model_VHR =AlbuNet34(num_classes=num_classes, num_input_channels=input_channels, pretrained=False)
    elif args.modelVHR == 'SegNet':
        model_VHR = SegNet(num_classes=num_classes, num_input_channels=input_channels, pretrained=False)
    else:
        model_VHR = UNet11(num_classes=num_classes, input_channels=4)

    if torch.cuda.is_available():
        if args.device_ids:#
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model_VHR = nn.DataParallel(model_VHR, device_ids=device_ids).cuda()

    cudnn.benchmark = True


    out_path = Path(('logs_{}/mapping/').format(args.out_file))
    
    #Data-paths:--------------------------VHr-------------------------------------
    data_path_VHR = Path(args.dataset_path_VHR) 
    print("data_path:",data_path_VHR)
  

    name_file_VHR = '_'+ str(int(args.percent*100))+'_percent_'+args.out_file
    data_all='data'
    ##--------------------------------------
 
   ############################  
    # NEstes cross validation K-fold train test
    ##train_val_file_names, test_file_names_HR = get_split_out(data_path_HR,data_all,args.fold_out)
    ############################  
   ############################  Cross validation
    train_val_file_names=np.array(sorted(glob.glob(str((data_path_VHR/args.train_val_file_VHR/'images'))+ "/*.npy")))
    test_file_names_VHR =  np.array(sorted(glob.glob(str((data_path_VHR/args.test_file_VHR/'images')) + "/*.npy")))
    
    if args.percent !=1:
        extra, train_val_file_names= percent_split(train_val_file_names, args.percent) 

    train_file_VHR_lab,val_file_VHR_lab = get_split_in(train_val_file_names,args.fold_in)
    np.save(str(os.path.join(out_path,"train_files{}_{}_fold{}_{}.npy".format(name_file_VHR, args.modelVHR, args.fold_out, args.fold_in))), train_file_VHR_lab)
    np.save(str(os.path.join(out_path,"val_files{}_{}_fold{}_{}.npy". format(name_file_VHR, args.modelVHR, args.fold_out, args.fold_in))), val_file_VHR_lab)

      #Data-paths:--------------------------unlabeled VHR-------------------------------------    
    
    train_path_VHR_unlab= data_path_VHR/'unlabel'/'train'/'images'
    val_path_VHR_unlab = data_path_VHR/'unlabel'/'val'/'images'
    
    

    train_file_VHR_unlab = np.array(sorted(list(train_path_VHR_unlab.glob('*.npy'))))
    val_file_VHR_unlab = np.array(sorted(list(val_path_VHR_unlab.glob('*.npy'))))
   
    print('num train_lab = {}, num_val_lab = {}'.format(len(train_file_VHR_lab), len(val_file_VHR_lab)))
    print('num train_unlab = {}, num_val_unlab = {}'.format(len(train_file_VHR_unlab), len(val_file_VHR_unlab)))
    
    max_values_VHR, mean_values_VHR, std_values_VHR=meanstd(train_file_VHR_lab, val_file_VHR_lab,test_file_names_VHR,str(data_path_VHR),input_channels)

    def make_loader(file_names, shuffle=False, transform=None,mode='train',batch_size=4, limit=None):
        return DataLoader(
            dataset=WaterDataset(file_names, transform=transform,mode=mode, limit=limit),
            shuffle=shuffle,            
            batch_size=batch_size, 
            pin_memory=torch.cuda.is_available() 

        )
 #transformations ---------------------------------------------------------------------------      
        
    train_transform_VHR = DualCompose([
            CenterCrop(512),
            HorizontalFlip(),
            VerticalFlip(),
            Rotate(),
            ImageOnly(Normalize(mean=mean_values_VHR,std= std_values_VHR))
        ])
    
    val_transform_VHR = DualCompose([
            CenterCrop(512),
            ImageOnly(Normalize(mean=mean_values_VHR, std=std_values_VHR))
        ])
#-------------------------------------------------------------------      
    mean_values_HR=(0.11952524, 0.1264638 , 0.13479991, 0.15017026)
    std_values_HR=(0.08844988, 0.07304429, 0.06740904, 0.11003125)
    
    train_transform_VHR_unlab = DualCompose([
            CenterCrop(512),
            HorizontalFlip(),
            VerticalFlip(),
            Rotate(),
            ImageOnly(Normalize(mean=mean_values_HR,std= std_values_HR))
        ])
    
    val_transform_VHR_unlab = DualCompose([
            CenterCrop(512),
            ImageOnly(Normalize(mean=mean_values_HR, std=std_values_HR))
        ])
    

######################## DATA-LOADERS ###########################################################49
    train_loader_VHR_lab = make_loader(train_file_VHR_lab, shuffle=True, transform=train_transform_VHR , batch_size = 2, mode = "train")
    valid_loader_VHR_lab = make_loader(val_file_VHR_lab, transform=val_transform_VHR, batch_size = 4, mode = "train")
    
    train_loader_VHR_unlab = make_loader(train_file_VHR_unlab, shuffle=True, transform=train_transform_VHR, batch_size = 4, mode = "unlb_train")
    valid_loader_VHR_unlab = make_loader(val_file_VHR_unlab, transform=val_transform_VHR, batch_size = 2, mode = "unlb_val")

    
    dataloaders_VHR_lab= {
        'train': train_loader_VHR_lab, 'val': valid_loader_VHR_lab
    }
    
    dataloaders_VHR_unlab= {
        'train': train_loader_VHR_unlab, 'val': valid_loader_VHR_unlab
    }

#----------------------------------------------    
    root.joinpath(('params_{}.json').format(args.out_file)).write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))
    
    # Observe that all parameters are being optimized
    optimizer_ft = optim.Adam(model_VHR.parameters(), lr= args.lr)  
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=20, gamma=0.1) 

#--------------------------model HR-------------------------------------
    PATH_HR= args.model_path_HR

    #Initialise the model
    model_HR = UNet11(num_classes=num_classes)
    model_HR.cuda()
    model_HR.load_state_dict(torch.load(PATH_HR))
#---------------------------------------------------------------
    model_VHR= utilsTrain_seq.train_model(
        out_file=args.out_file,
        name_file_VHR=name_file_VHR,
        model_HR=model_HR, 
        model_VHR=model_VHR,
        optimizer=optimizer_ft,
        scheduler=exp_lr_scheduler,
        dataloaders_VHR_lab=dataloaders_VHR_lab,
        dataloaders_VHR_unlab=dataloaders_VHR_unlab,
        fold_out=args.fold_out,
        fold_in=args.fold_in,
        name_model_VHR=args.modelVHR,
        n_steps=args.n_steps,
        num_epochs=args.n_epochs 
        
        )


    torch.save(model_VHR.module.state_dict(), (str(out_path)+'/model{}_{}_foldout{}_foldin{}_{}epochs.pth').format(args.n_epochs,name_file_VHR,args.modelVHR, args.fold_out,args.fold_in,args.n_epochs))

    print(args.modelVHR)
    max_values_all_VHR=3521

    find_metrics(train_file_names=train_file_VHR_lab, 
                 val_file_names=val_file_VHR_lab,
                 test_file_names=test_file_names_VHR, 
                 max_values=max_values_all_VHR, 
                 mean_values=mean_values_VHR, 
                 std_values=std_values_VHR, 
                 model=model_VHR, 
                 fold_out=args.fold_out, 
                 fold_in=args.fold_in,
                 name_model=args.modelVHR,
                 epochs=args.n_epochs, 
                 out_file=args.out_file, 
                 dataset_file=args.dataset_file,
                 name_file=name_file_VHR)
コード例 #6
0
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--jaccard-weight', default=0.3, type=float)
    arg('--device-ids',
        type=str,
        default='0',
        help='For example 0,1 to run on two GPUs')
    arg('--fold', type=int, help='fold', default=0)
    arg('--root', default='runs/debug', help='checkpoint root')
    arg('--batch-size', type=int, default=1)
    arg('--limit', type=int, default=10000, help='number of images in epoch')
    arg('--n-epochs', type=int, default=100)
    arg('--lr', type=float, default=0.0001)
    arg('--workers', type=int, default=12)
    arg('--model',
        type=str,
        default='UNet',
        choices=['UNet', 'UNet11', 'UNet16', 'AlbuNet34'])

    args = parser.parse_args()

    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    num_classes = 1
    if args.model == 'UNet':
        model = UNet(num_classes=num_classes)
    elif args.model == 'UNet11':
        model = UNet11(num_classes=num_classes, pretrained=True)
    elif args.model == 'UNet16':
        model = UNet16(num_classes=num_classes, pretrained=True)
    elif args.model == 'LinkNet34':
        model = LinkNet34(num_classes=num_classes, pretrained=True)
    elif args.model == 'AlbuNet':
        model = AlbuNet34(num_classes=num_classes, pretrained=True)
    else:
        model = UNet(num_classes=num_classes, input_channels=3)

    if torch.cuda.is_available():
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model, device_ids=device_ids).cuda()

    loss = LossBinary(jaccard_weight=args.jaccard_weight)

    cudnn.benchmark = True

    def make_loader(file_names, shuffle=False, transform=None, limit=None):
        return DataLoader(dataset=AngyodysplasiaDataset(file_names,
                                                        transform=transform,
                                                        limit=limit),
                          shuffle=shuffle,
                          num_workers=args.workers,
                          batch_size=args.batch_size,
                          pin_memory=torch.cuda.is_available())

    train_file_names, val_file_names = get_split(args.fold)

    print('num train = {}, num_val = {}'.format(len(train_file_names),
                                                len(val_file_names)))

    train_transform = DualCompose([
        SquarePaddingTraining(),
        CenterCrop([574, 574]),
        HorizontalFlip(),
        VerticalFlip(),
        Rotate(),
        ImageOnly(RandomHueSaturationValue()),
        ImageOnly(Normalize())
    ])

    val_transform = DualCompose([
        SquarePaddingTraining(),
        CenterCrop([574, 574]),
        ImageOnly(Normalize())
    ])

    train_loader = make_loader(train_file_names,
                               shuffle=True,
                               transform=train_transform,
                               limit=args.limit)
    valid_loader = make_loader(val_file_names, transform=val_transform)

    root.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    utils.train(init_optimizer=lambda lr: Adam(model.parameters(), lr=lr),
                args=args,
                model=model,
                criterion=loss,
                train_loader=train_loader,
                valid_loader=valid_loader,
                validation=validation_binary,
                fold=args.fold)
コード例 #7
0
import os
import glob
import torch
from tqdm import tqdm
import torch.nn as nn
from models import UNet11
import torch.optim as optim
from dataset import PersonDataset
import torchvision.models as models

lr = 0.001
batch_size = 30
num_epochs = 20

generator = UNet11()
generator.load_state_dict(torch.load('generator_early_trained.pth'))
generator.cuda()

discriminator = torch.load('discriminator_early_trained.pth')
discriminator.cuda()

persontraindataset = PersonDataset('../datasets/celeb_dataset',
                                   mode='train',
                                   transforms=transforms)
persontraindataloader = DataLoader(persontraindataset,
                                   batch_size=batch_size,
                                   shuffle=True)

personvaldataset = PersonDataset('../datasets/celeb_dataset',
                                 mode='test',
                                 transforms=None)
コード例 #8
0
        scheduler.step(val_bce)
        net.train()

        print('Validation Dice Coeff: {}, bce: {}'.format(val_dice, val_bce))

        if cp and epoch_num % 5 == 0:
            torch.save(
                net.state_dict(), CHECKPOINT_DIR +
                'linknet_{}_loss{}.pth'.format(epoch_num + 1, loss.data[0]))

            print('Checkpoint {} saved !'.format(epoch_num + 1))


if __name__ == '__main__':
    print(NUM_CLASSES)
    net = UNet11(NUM_CLASSES).cuda().double()
    # net = LinkNet34(NUM_CLASSES).cuda()
    cudnn.benchmark = True

    # if os.path.exists(RESTORE_INTERRUPTED) and RESTORE_INTERRUPTED is not None:
    #     net.load_state_dict(torch.load(RESTORE_INTERRUPTED))
    #     print('Model loaded from {}'.format('interrupted.pth'))
    try:
        train_net(net, EPOCH_NUM, BATCH_SIZE, LEARNING_RATE, gpu=True)
    except KeyboardInterrupt:
        torch.save(net.state_dict(), RESTORE_INTERRUPTED)
        print('Saved interrupt')
        try:
            sys.exit(0)
        except SystemExit:
            os._exit(0)
コード例 #9
0
def get_model(path):
    model = UNet11().cuda()
    model.load_state_dict(torch.load(path))
    model = model.eval()
    return model
コード例 #10
0
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--jaccard-weight', default=1, type=float)
    arg('--device-ids',
        type=str,
        default='0',
        help='For example 0,1 to run on two GPUs')
    arg('--fold', type=int, help='fold', default=0)
    arg('--root', default='runs/debug', help='checkpoint root')
    arg('--batch-size', type=int, default=1)
    arg('--n-epochs', type=int, default=10)
    arg('--lr', type=float, default=0.0002)
    arg('--workers', type=int, default=10)
    arg('--type',
        type=str,
        default='binary',
        choices=['binary', 'parts', 'instruments'])
    arg('--model',
        type=str,
        default='DLinkNet',
        choices=['UNet', 'UNet11', 'LinkNet34', 'DLinkNet'])

    args = parser.parse_args()

    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    if args.type == 'parts':
        num_classes = 4
    elif args.type == 'instruments':
        num_classes = 8
    else:
        num_classes = 1

    if args.model == 'UNet':
        model = UNet(num_classes=num_classes)
    elif args.model == 'UNet11':
        model = UNet11(num_classes=num_classes, pretrained='vgg')
    elif args.model == 'UNet16':
        model = UNet16(num_classes=num_classes, pretrained='vgg')
    elif args.model == 'LinkNet34':
        model = LinkNet34(num_classes=num_classes, pretrained=True)
    elif args.model == 'DLinkNet':
        model = D_LinkNet34(num_classes=num_classes, pretrained=True)
    else:
        model = UNet(num_classes=num_classes, input_channels=3)

    if torch.cuda.is_available():
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model, device_ids=device_ids).cuda()

    if args.type == 'binary':
        # loss = LossBinary(jaccard_weight=args.jaccard_weight)
        loss = LossBCE_DICE()
    else:
        loss = LossMulti(num_classes=num_classes,
                         jaccard_weight=args.jaccard_weight)

    cudnn.benchmark = True

    def make_loader(file_names,
                    shuffle=False,
                    transform=None,
                    problem_type='binary'):
        return DataLoader(dataset=RoboticsDataset(file_names,
                                                  transform=transform,
                                                  problem_type=problem_type),
                          shuffle=shuffle,
                          num_workers=args.workers,
                          batch_size=args.batch_size,
                          pin_memory=torch.cuda.is_available())

    # train_file_names, val_file_names = get_split(args.fold)
    train_file_names, val_file_names = get_train_val_files()

    print('num train = {}, num_val = {}'.format(len(train_file_names),
                                                len(val_file_names)))

    train_transform = DualCompose(
        [HorizontalFlip(),
         VerticalFlip(),
         ImageOnly(Normalize())])

    val_transform = DualCompose([ImageOnly(Normalize())])

    train_loader = make_loader(train_file_names,
                               shuffle=True,
                               transform=train_transform,
                               problem_type=args.type)
    valid_loader = make_loader(val_file_names,
                               transform=val_transform,
                               problem_type=args.type)

    root.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    if args.type == 'binary':
        valid = validation_binary
    else:
        valid = validation_multi

    utils.train(init_optimizer=lambda lr: Adam(model.parameters(), lr=lr),
                args=args,
                model=model,
                criterion=loss,
                train_loader=train_loader,
                valid_loader=valid_loader,
                validation=valid,
                fold=args.fold,
                num_classes=num_classes)
コード例 #11
0
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--jaccard-weight', default=0.3, type=float)
    arg('--device-ids', type=str, default='0', help='For example 0,1 to run on two GPUs')
    arg('--fold', type=int, help='fold', default=0)
    arg('--root', default='runs/debug', help='checkpoint root')
    arg('--batch-size', type=int, default=1)
    arg('--limit', type=int, default=10000, help='number of images in epoch')
    arg('--n-epochs', type=int, default=100)
    arg('--lr', type=float, default=0.001)
    arg('--workers', type=int, default=12)
    arg('--model', type=str, default='UNet', choices=['UNet', 'UNet11', 'LinkNet34', 'UNet16', 'AlbuNet34', 'MDeNet', 'EncDec', 'hourglass', 'MDeNetplus'])

    args = parser.parse_args()
    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    num_classes = 1
    if args.model == 'UNet':
        model = UNet(num_classes=num_classes)
    elif args.model == 'UNet11':
        model = UNet11(num_classes=num_classes, pretrained=True)
    elif args.model == 'UNet16':
        model = UNet16(num_classes=num_classes, pretrained=True)
    elif args.model == 'MDeNet':
        print('Mine MDeNet..................')
        model = MDeNet(num_classes=num_classes, pretrained=True)
    elif args.model == 'MDeNetplus':
        print('load MDeNetplus..................')
        model = MDeNetplus(num_classes=num_classes, pretrained=True)
    elif args.model == 'EncDec':
        print('Mine EncDec..................')
        model = EncDec(num_classes=num_classes, pretrained=True)
    elif args.model == 'GAN':
        model = GAN(num_classes=num_classes, pretrained=True)
    elif args.model == 'AlbuNet34':
        model = AlbuNet34(num_classes=num_classes, pretrained=False)
    elif args.model == 'hourglass':
        model = hourglass(num_classes=num_classes, pretrained=True) 
    else:
        model = UNet(num_classes=num_classes, input_channels=3)

    if torch.cuda.is_available():
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model).cuda()   #  nn.DataParallel(model, device_ids=device_ids).cuda()
    
    cudnn.benchmark = True
    
    def make_loader(file_names, shuffle=False, transform=None, limit=None):
        return DataLoader(
            dataset=Polyp(file_names, transform=transform, limit=limit),
            shuffle=shuffle,
            num_workers=args.workers,
            batch_size=args.batch_size,
            pin_memory=torch.cuda.is_available()
        )

    train_file_names, val_file_names = get_split(args.fold)

    print('num train = {}, num_val = {}'.format(len(train_file_names), len(val_file_names)))
    
    train_transform = DualCompose([
        CropCVC612(),
        img_resize(512),
        HorizontalFlip(),
        VerticalFlip(),
        Rotate(),
        Rescale(), 
        Zoomin(),
        ImageOnly(RandomHueSaturationValue()),
        ImageOnly(Normalize())
    ])

    train_loader = make_loader(train_file_names, shuffle=True, transform=train_transform, limit=args.limit)

    root.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    utils.train(
        args=args,
        model=model,
        train_loader=train_loader,
        fold=args.fold
    )
コード例 #12
0
    arg('--test-file', type=str, default='test_512', help='name of the test file test_512 or test_160' )



    args = parser.parse_args()    
    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    num_classes = 3
    channels=list(map(int, args.channels.split(','))) #5
    input_channels=len(channels)
    print('channels:',channels,'len',input_channels)
    
    
    if args.model == 'UNet11':
        model = UNet11(num_classes=num_classes, input_channels=input_channels)
    elif args.model == 'UNet':
        model = UNet(num_classes=num_classes, input_channels=input_channels)
    elif args.model == 'AlbuNet34':
        model = AlbuNet34(num_classes=num_classes, num_input_channels=input_channels, pretrained=False)
    elif args.model == 'SegNet':
        model = SegNet(num_classes=num_classes, num_input_channels=input_channels, pretrained=False)
    elif args.model == 'DeepLabV3':
        model = deeplabv3_resnet101(pretrained=False, progress=True, num_classes=num_classes)
        #model = models.segmentation.deeplabv3_resnet101(pretrained=False, progress=True, num_classes=num_classes)
    elif args.model == 'FCN':
        model = fcn_resnet101(pretrained=False, progress=True, num_classes=num_classes)
    else:
        model = UNet11(num_classes=num_classes, input_channels=input_channels)

    
コード例 #13
0
persontraindataset = PersonDataset('../datasets/celeb_dataset',
                                   mode='train',
                                   transforms=transforms)
persontraindataloader = DataLoader(persontraindataset,
                                   batch_size=batch_size,
                                   shuffle=True)

personvaldataset = PersonDataset('../datasets/celeb_dataset',
                                 mode='test',
                                 transforms=None)
personvaldataloader = DataLoader(personvaldataset,
                                 batch_size=batch_size,
                                 shuffle=True)

generator = UNet11(pretrained='vgg')

generator.cuda()

image_loss = nn.MSELoss()

optim_generator = optim.SGD(generator.parameters(), lr=lr, momentum=0.9)
losses_generator_train = []
losses_generator_val = []

for epoch in range(num_epochs):
    loss_batch_train = 0.0
    loss_batch_val = 0.0
    generator.train()
    print('\n Epoch:{}'.format(epoch + 1))
    for i, (correct_img, degraded_img, _,
コード例 #14
0
ファイル: train_tiramisu.py プロジェクト: Two222/ISIC2018
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--jaccard-weight', type=float, default=1)
    arg('--root', type=str, default='runs/debug', help='checkpoint root')
    arg('--image-path', type=str, default='data', help='image path')
    arg('--batch-size', type=int, default=2)
    arg('--n-epochs', type=int, default=100)
    arg('--optimizer', type=str, default='Adam', help='Adam or SGD')
    arg('--lr', type=float, default=0.001)
    arg('--workers', type=int, default=10)
    arg('--model',
        type=str,
        default='UNet16',
        choices=[
            'UNet', 'UNet11', 'UNet16', 'LinkNet34', 'FCDenseNet57',
            'FCDenseNet67', 'FCDenseNet103'
        ])
    arg('--model-weight', type=str, default=None)
    arg('--resume-path', type=str, default=None)
    arg('--attribute',
        type=str,
        default='all',
        choices=[
            'pigment_network', 'negative_network', 'streaks',
            'milia_like_cyst', 'globules', 'all'
        ])
    args = parser.parse_args()

    ## folder for checkpoint
    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    image_path = args.image_path

    #print(args)
    if args.attribute == 'all':
        num_classes = 5
    else:
        num_classes = 1
    args.num_classes = num_classes
    ### save initial parameters
    print('--' * 10)
    print(args)
    print('--' * 10)
    root.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    ## load pretrained model
    if args.model == 'UNet':
        model = UNet(num_classes=num_classes)
    elif args.model == 'UNet11':
        model = UNet11(num_classes=num_classes, pretrained='vgg')
    elif args.model == 'UNet16':
        model = UNet16(num_classes=num_classes, pretrained='vgg')
    elif args.model == 'LinkNet34':
        model = LinkNet34(num_classes=num_classes, pretrained=True)
    elif args.model == 'FCDenseNet103':
        model = FCDenseNet103(num_classes=num_classes)
    else:
        model = UNet(num_classes=num_classes, input_channels=3)

    ## multiple GPUs
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)
    model.to(device)

    ## load pretrained model
    if args.model_weight is not None:
        state = torch.load(args.model_weight)
        #epoch = state['epoch']
        #step = state['step']
        model.load_state_dict(state['model'])
        print('--' * 10)
        print('Load pretrained model', args.model_weight)
        #print('Restored model, epoch {}, step {:,}'.format(epoch, step))
        print('--' * 10)
        ## replace the last layer
        ## although the model and pre-trained weight have differernt size (the last layer is different)
        ## pytorch can still load the weight
        ## I found that the weight for one layer just duplicated for all layers
        ## therefore, the following code is not necessary
        # if args.attribute == 'all':
        #     model = list(model.children())[0]
        #     num_filters = 32
        #     model.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)
        #     print('--' * 10)
        #     print('Load pretrained model and replace the last layer', args.model_weight, num_classes)
        #     print('--' * 10)
        #     if torch.cuda.device_count() > 1:
        #         model = nn.DataParallel(model)
        #     model.to(device)

    ## model summary
    print_model_summay(model)

    ## define loss
    loss_fn = LossBinary(jaccard_weight=args.jaccard_weight)

    ## It enables benchmark mode in cudnn.
    ## benchmark mode is good whenever your input sizes for your network do not vary. This way, cudnn will look for the
    ## optimal set of algorithms for that particular configuration (which takes some time). This usually leads to faster runtime.
    ## But if your input sizes changes at each iteration, then cudnn will benchmark every time a new size appears,
    ## possibly leading to worse runtime performances.
    cudnn.benchmark = True

    ## get train_test_id
    train_test_id = get_split()

    ## train vs. val
    print('--' * 10)
    print('num train = {}, num_val = {}'.format(
        (train_test_id['Split'] == 'train').sum(),
        (train_test_id['Split'] != 'train').sum()))
    print('--' * 10)

    train_transform = DualCompose(
        [HorizontalFlip(),
         VerticalFlip(),
         ImageOnly(Normalize())])

    val_transform = DualCompose([ImageOnly(Normalize())])

    ## define data loader
    train_loader = make_loader(train_test_id,
                               image_path,
                               args,
                               train=True,
                               shuffle=True,
                               transform=train_transform)
    valid_loader = make_loader(train_test_id,
                               image_path,
                               args,
                               train=False,
                               shuffle=True,
                               transform=val_transform)

    if True:
        print('--' * 10)
        print('check data')
        train_image, train_mask, train_mask_ind = next(iter(train_loader))
        print('train_image.shape', train_image.shape)
        print('train_mask.shape', train_mask.shape)
        print('train_mask_ind.shape', train_mask_ind.shape)
        print('train_image.min', train_image.min().item())
        print('train_image.max', train_image.max().item())
        print('train_mask.min', train_mask.min().item())
        print('train_mask.max', train_mask.max().item())
        print('train_mask_ind.min', train_mask_ind.min().item())
        print('train_mask_ind.max', train_mask_ind.max().item())
        print('--' * 10)

    valid_fn = validation_binary

    ###########
    ## optimizer
    if args.optimizer == 'Adam':
        optimizer = Adam(model.parameters(), lr=args.lr)
    elif args.optimizer == 'SGD':
        optimizer = SGD(model.parameters(), lr=args.lr, momentum=0.9)

    ## loss
    criterion = loss_fn
    ## change LR
    scheduler = ReduceLROnPlateau(optimizer,
                                  'min',
                                  factor=0.8,
                                  patience=5,
                                  verbose=True)

    ##########
    ## load previous model status
    previous_valid_loss = 10
    model_path = root / 'model.pt'
    if args.resume_path is not None and model_path.exists():
        state = torch.load(str(model_path))
        epoch = state['epoch']
        step = state['step']
        model.load_state_dict(state['model'])
        epoch = 1
        step = 0
        try:
            previous_valid_loss = state['valid_loss']
        except:
            previous_valid_loss = 10
        print('--' * 10)
        print('Restored previous model, epoch {}, step {:,}'.format(
            epoch, step))
        print('--' * 10)
    else:
        epoch = 1
        step = 0

    #########
    ## start training
    log = root.joinpath('train.log').open('at', encoding='utf8')
    writer = SummaryWriter()
    meter = AllInOneMeter()
    #if previous_valid_loss = 10000
    print('Start training')
    print_model_summay(model)
    previous_valid_jaccard = 0
    for epoch in range(epoch, args.n_epochs + 1):
        model.train()
        random.seed()
        #jaccard = []
        start_time = time.time()
        meter.reset()
        w1 = 1.0
        w2 = 0.5
        w3 = 0.5
        try:
            train_loss = 0
            valid_loss = 0
            # if epoch == 1:
            #     freeze_layer_names = get_freeze_layer_names(part='encoder')
            #     set_freeze_layers(model, freeze_layer_names=freeze_layer_names)
            #     #set_train_layers(model, train_layer_names=['module.final.weight','module.final.bias'])
            #     print_model_summay(model)
            # elif epoch == 5:
            #     w1 = 1.0
            #     w2 = 0.0
            #     w3 = 0.5
            #     freeze_layer_names = get_freeze_layer_names(part='encoder')
            #     set_freeze_layers(model, freeze_layer_names=freeze_layer_names)
            #     # set_train_layers(model, train_layer_names=['module.final.weight','module.final.bias'])
            #     print_model_summay(model)
            #elif epoch == 3:
            #     set_train_layers(model, train_layer_names=['module.dec5.block.0.conv.weight','module.dec5.block.0.conv.bias',
            #                                                'module.dec5.block.1.weight','module.dec5.block.1.bias',
            #                                                'module.dec4.block.0.conv.weight','module.dec4.block.0.conv.bias',
            #                                                'module.dec4.block.1.weight','module.dec4.block.1.bias',
            #                                                'module.dec3.block.0.conv.weight','module.dec3.block.0.conv.bias',
            #                                                'module.dec3.block.1.weight','module.dec3.block.1.bias',
            #                                                'module.dec2.block.0.conv.weight','module.dec2.block.0.conv.bias',
            #                                                'module.dec2.block.1.weight','module.dec2.block.1.bias',
            #                                                'module.dec1.conv.weight','module.dec1.conv.bias',
            #                                                'module.final.weight','module.final.bias'])
            #     print_model_summa zvgf    t5y(model)
            # elif epoch == 50:
            #     set_freeze_layers(model, freeze_layer_names=None)
            #     print_model_summay(model)
            for i, (train_image, train_mask,
                    train_mask_ind) in enumerate(train_loader):
                # inputs, targets = variable(inputs), variable(targets)

                train_image = train_image.permute(0, 3, 1, 2)
                train_mask = train_mask.permute(0, 3, 1, 2)
                train_image = train_image.to(device)
                train_mask = train_mask.to(device).type(torch.cuda.FloatTensor)
                train_mask_ind = train_mask_ind.to(device).type(
                    torch.cuda.FloatTensor)
                # if args.problem_type == 'binary':
                #     train_mask = train_mask.to(device).type(torch.cuda.FloatTensor)
                # else:
                #     #train_mask = train_mask.to(device).type(torch.cuda.LongTensor)
                #     train_mask = train_mask.to(device).type(torch.cuda.FloatTensor)

                outputs, outputs_mask_ind1, outputs_mask_ind2 = model(
                    train_image)
                #print(outputs.size())
                #print(outputs_mask_ind1.size())
                #print(outputs_mask_ind2.size())
                ### note that the last layer in the model is defined differently
                # if args.problem_type == 'binary':
                #     train_prob = F.sigmoid(outputs)
                #     loss = criterion(outputs, train_mask)
                # else:
                #     #train_prob = outputs
                #     train_prob = F.sigmoid(outputs)
                #     loss = torch.tensor(0).type(train_mask.type())
                #     for feat_inx in range(train_mask.shape[1]):
                #         loss += criterion(outputs, train_mask)
                train_prob = F.sigmoid(outputs)
                train_mask_ind_prob1 = F.sigmoid(outputs_mask_ind1)
                train_mask_ind_prob2 = F.sigmoid(outputs_mask_ind2)
                loss1 = criterion(outputs, train_mask)
                #loss1 = F.binary_cross_entropy_with_logits(outputs, train_mask)
                #loss2 = nn.BCEWithLogitsLoss()(outputs_mask_ind1, train_mask_ind)
                #print(train_mask_ind.size())
                #weight = torch.ones_like(train_mask_ind)
                #weight[:, 0] = weight[:, 0] * 1
                #weight[:, 1] = weight[:, 1] * 14
                #weight[:, 2] = weight[:, 2] * 14
                #weight[:, 3] = weight[:, 3] * 4
                #weight[:, 4] = weight[:, 4] * 4
                #weight = weight * train_mask_ind + 1
                #weight = weight.to(device).type(torch.cuda.FloatTensor)
                loss2 = F.binary_cross_entropy_with_logits(
                    outputs_mask_ind1, train_mask_ind)
                loss3 = F.binary_cross_entropy_with_logits(
                    outputs_mask_ind2, train_mask_ind)
                #loss3 = criterion(outputs_mask_ind2, train_mask_ind)
                loss = loss1 * w1 + loss2 * w2 + loss3 * w3
                #print(loss1.item(), loss2.item(), loss.item())
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                step += 1
                #jaccard += [get_jaccard(train_mask, (train_prob > 0).float()).item()]
                meter.add(train_prob, train_mask, train_mask_ind_prob1,
                          train_mask_ind_prob2, train_mask_ind, loss1.item(),
                          loss2.item(), loss3.item(), loss.item())
                # print(train_mask.data.shape)
                # print(train_mask.data.sum(dim=-2).shape)
                # print(train_mask.data.sum(dim=-2).sum(dim=-1).shape)
                # print(train_mask.data.sum(dim=-2).sum(dim=-1).sum(dim=0).shape)
                # intersection = train_mask.data.sum(dim=-2).sum(dim=-1)
                # print(intersection.shape)
                # print(intersection.dtype)
                # print(train_mask.data.shape[0])
                #torch.zeros([2, 4], dtype=torch.float32)
            #########################
            ## at the end of each epoch, evualte the metrics
            epoch_time = time.time() - start_time
            train_metrics = meter.value()
            train_metrics['epoch_time'] = epoch_time
            train_metrics['image'] = train_image.data
            train_metrics['mask'] = train_mask.data
            train_metrics['prob'] = train_prob.data

            #train_jaccard = np.mean(jaccard)
            #train_auc = str(round(mtr1.value()[0],2))+' '+str(round(mtr2.value()[0],2))+' '+str(round(mtr3.value()[0],2))+' '+str(round(mtr4.value()[0],2))+' '+str(round(mtr5.value()[0],2))
            valid_metrics = valid_fn(model, criterion, valid_loader, device,
                                     num_classes)
            ##############
            ## write events
            write_event(log,
                        step,
                        epoch=epoch,
                        train_metrics=train_metrics,
                        valid_metrics=valid_metrics)
            #save_weights(model, model_path, epoch + 1, step)
            #########################
            ## tensorboard
            write_tensorboard(writer,
                              model,
                              epoch,
                              train_metrics=train_metrics,
                              valid_metrics=valid_metrics)
            #########################
            ## save the best model
            valid_loss = valid_metrics['loss1']
            valid_jaccard = valid_metrics['jaccard']
            if valid_loss < previous_valid_loss:
                save_weights(model, model_path, epoch + 1, step, train_metrics,
                             valid_metrics)
                previous_valid_loss = valid_loss
                print('Save best model by loss')
            if valid_jaccard > previous_valid_jaccard:
                save_weights(model, model_path, epoch + 1, step, train_metrics,
                             valid_metrics)
                previous_valid_jaccard = valid_jaccard
                print('Save best model by jaccard')
            #########################
            ## change learning rate
            scheduler.step(valid_metrics['loss1'])

        except KeyboardInterrupt:
            # print('--' * 10)
            # print('Ctrl+C, saving snapshot')
            # save_weights(model, model_path, epoch, step)
            # print('done.')
            # print('--' * 10)
            writer.close()
            #return
    writer.close()
コード例 #15
0
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--device-ids',
        type=str,
        default='0',
        help='For example 0,1 to run on two GPUs')
    arg('--fold-out', type=int, default='0', help='fold train-val test')
    arg('--fold-in', type=int, default='0', help='fold train val')
    arg('--percent', type=float, default=1, help='percent of data')
    arg('--root', default='runs/debug', help='checkpoint root')
    arg('--batch-size', type=int, default=4, help='HR:4,VHR:8')
    arg('--limit', type=int, default=10000, help='number of images in epoch')
    arg('--n-epochs', type=int, default=40)
    arg('--lr', type=float, default=1e-3)
    arg('--model',
        type=str,
        default='UNet11',
        choices=['UNet11', 'UNet', 'AlbuNet34', 'SegNet'])
    arg('--dataset-path',
        type=str,
        default='data_VHR',
        help='main file,in which the dataset is:  data_VHR or data_HR')
    arg('--dataset-file',
        type=str,
        default='VHR',
        help='resolution of the dataset VHR,HR')
    #arg('--out-file', type=str, default='VHR', help='the file in which save the outputs')
    arg('--train-val-file',
        type=str,
        default='train_val_850',
        help='name of the train-val file VHR:train_val_850 or train_val_HR')
    arg('--test-file',
        type=str,
        default='test_850',
        help='name of the test file VHR:test_850 or HR:test_HR')

    args = parser.parse_args()

    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    num_classes = 1
    input_channels = 4

    if args.model == 'UNet11':
        model = UNet11(num_classes=num_classes, input_channels=input_channels)
    elif args.model == 'UNet':
        model = UNet(num_classes=num_classes, input_channels=input_channels)
    elif args.model == 'AlbuNet34':
        model = AlbuNet34(num_classes=num_classes,
                          num_input_channels=input_channels,
                          pretrained=False)
    elif args.model == 'SegNet':
        model = SegNet(num_classes=num_classes,
                       num_input_channels=input_channels,
                       pretrained=False)
    else:
        model = UNet11(num_classes=num_classes, input_channels=input_channels)

    if torch.cuda.is_available():
        if args.device_ids:  #
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model, device_ids=device_ids).cuda()

    cudnn.benchmark = True

    ####################Change the files_names ######################################
    out_path = Path(('logs_{}/mapping/').format(args.dataset_file))
    name_file = '_' + str(int(
        args.percent * 100)) + '_percent_' + args.dataset_file
    data_all = 'data'  ##file with all the data

    data_path = Path(args.dataset_path)
    print("data_path:", data_path)
    #################################################################################
    # Nested cross validation K-fold train test
    #train_val_file_names, test_file_names = get_split_out(data_path,data_all,args.fold_out)
    #################################################################################
    #eWe are consider the same test in all the cases
    train_val_file_names = np.array(
        sorted(
            glob.glob(
                str(data_path / args.train_val_file / 'images') + "/*.npy")))
    test_file_names = np.array(
        sorted(
            glob.glob(str(data_path / args.test_file / 'images') + "/*.npy")))

    if args.percent != 1:
        extra, train_val_file_names = percent_split(train_val_file_names,
                                                    args.percent)

    #################################################################################

    train_file_names, val_file_names = get_split_in(train_val_file_names,
                                                    args.fold_in)

    np.save(
        str(
            os.path.join(
                out_path, "train_files{}_{}_fold{}_{}.npy".format(
                    name_file, args.model, args.fold_out, args.fold_in))),
        train_file_names)
    np.save(
        str(
            os.path.join(
                out_path,
                "val_files{}_{}_fold{}_{}.npy".format(name_file, args.model,
                                                      args.fold_out,
                                                      args.fold_in))),
        val_file_names)

    print('num train = {}, num_val = {}'.format(len(train_file_names),
                                                len(val_file_names)))

    def make_loader(file_names,
                    shuffle=False,
                    transform=None,
                    mode='train',
                    batch_size=4,
                    limit=None):
        return DataLoader(dataset=WaterDataset(file_names,
                                               transform=transform,
                                               mode=mode,
                                               limit=limit),
                          shuffle=shuffle,
                          batch_size=batch_size,
                          pin_memory=torch.cuda.is_available())

    max_values, mean_values, std_values = meanstd(train_file_names,
                                                  val_file_names,
                                                  test_file_names,
                                                  str(data_path),
                                                  input_channels)  #_60
    print(max_values, mean_values, std_values)
    if (args.dataset_file == 'VHR'):
        train_transform = DualCompose([
            CenterCrop(512),
            HorizontalFlip(),
            VerticalFlip(),
            Rotate(),
            ImageOnly(Normalize(mean=mean_values, std=std_values))
        ])

        val_transform = DualCompose([
            CenterCrop(512),
            ImageOnly(Normalize(mean=mean_values, std=std_values))
        ])
        max_values = 3521
        train_loader = make_loader(train_file_names,
                                   shuffle=True,
                                   transform=train_transform,
                                   mode='train',
                                   batch_size=args.batch_size)  #4 batch_size
        valid_loader = make_loader(val_file_names,
                                   transform=val_transform,
                                   batch_size=args.batch_size,
                                   mode="train")

    if (args.dataset_file == 'HR'):
        train_transform = DualCompose([
            CenterCrop(64),
            HorizontalFlip(),
            VerticalFlip(),
            Rotate(),
            ImageOnly(Normalize2(mean=mean_values, std=std_values))
        ])

        val_transform = DualCompose([
            CenterCrop(64),
            ImageOnly(Normalize2(mean=mean_values, std=std_values))
        ])
        train_loader = make_loader(train_file_names,
                                   shuffle=True,
                                   transform=train_transform,
                                   mode='train',
                                   batch_size=args.batch_size)  #8 batch_size
        valid_loader = make_loader(val_file_names,
                                   transform=val_transform,
                                   mode="train",
                                   batch_size=args.batch_size // 2)


#albunet 34 with only 3 batch_size

    dataloaders = {'train': train_loader, 'val': valid_loader}

    dataloaders_sizes = {x: len(dataloaders[x]) for x in dataloaders.keys()}

    root.joinpath(('params_{}.json').format(args.dataset_file)).write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    optimizer_ft = optim.Adam(model.parameters(), lr=args.lr)  #
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft,
                                           step_size=20,
                                           gamma=0.1)

    utilsTrain.train_model(dataset_file=args.dataset_file,
                           name_file=name_file,
                           model=model,
                           optimizer=optimizer_ft,
                           scheduler=exp_lr_scheduler,
                           dataloaders=dataloaders,
                           fold_out=args.fold_out,
                           fold_in=args.fold_in,
                           name_model=args.model,
                           num_epochs=args.n_epochs)

    torch.save(
        model.module.state_dict(),
        (str(out_path) + '/model{}_{}_foldout{}_foldin{}_{}epochs').format(
            name_file, args.model, args.fold_out, args.fold_in, args.n_epochs))

    print(args.model)

    find_metrics(train_file_names=train_file_names,
                 val_file_names=val_file_names,
                 test_file_names=test_file_names,
                 max_values=max_values,
                 mean_values=mean_values,
                 std_values=std_values,
                 model=model,
                 fold_out=args.fold_out,
                 fold_in=args.fold_in,
                 name_model=args.model,
                 epochs=args.n_epochs,
                 out_file=args.dataset_file,
                 dataset_file=args.dataset_file,
                 name_file=name_file)
コード例 #16
0
def main():
    parser = argparse.ArgumentParser()
    arg = parser.add_argument
    arg('--jaccard-weight', default=0.5, type=float)
    arg('--device-ids',
        type=str,
        default='0',
        help='For example 0,1 to run on two GPUs')
    arg('--fold', type=int, help='fold', default=0)
    arg('--root', default='runs/debug', help='checkpoint root')
    arg('--batch-size', type=int, default=1)
    arg('--n-epochs', type=int, default=100)
    arg('--lr', type=float, default=0.0001)
    arg('--workers', type=int, default=12)
    arg('--type',
        type=str,
        default='binary',
        choices=['binary', 'parts', 'instruments'])
    arg('--model',
        type=str,
        default='UNet',
        choices=['UNet', 'UNet11', 'LinkNet34', 'AlbuNet'])

    args = parser.parse_args()

    root = Path(args.root)
    root.mkdir(exist_ok=True, parents=True)

    if args.type == 'parts':
        num_classes = 4
    elif args.type == 'instruments':
        num_classes = 8
    else:
        num_classes = 1

    if args.model == 'UNet':
        model = UNet(num_classes=num_classes)
    elif args.model == 'UNet11':
        model = UNet11(num_classes=num_classes, pretrained=True)
    elif args.model == 'UNet16':
        model = UNet16(num_classes=num_classes, pretrained=True)
    elif args.model == 'LinkNet34':
        model = LinkNet34(num_classes=num_classes, pretrained=True)
    elif args.model == 'AlbuNet':
        model = AlbuNet(num_classes=num_classes, pretrained=True)
    else:
        model = UNet(num_classes=num_classes, input_channels=3)

    if torch.cuda.is_available():
        if args.device_ids:
            device_ids = list(map(int, args.device_ids.split(',')))
        else:
            device_ids = None
        model = nn.DataParallel(model, device_ids=device_ids).cuda()

    if args.type == 'binary':
        loss = LossBinary(jaccard_weight=args.jaccard_weight)
    else:
        loss = LossMulti(num_classes=num_classes,
                         jaccard_weight=args.jaccard_weight)

    cudnn.benchmark = True

    def make_loader(file_names,
                    shuffle=False,
                    transform=None,
                    problem_type='binary',
                    batch_size=1):
        return DataLoader(dataset=CustomDataset(file_names,
                                                transform=transform),
                          shuffle=shuffle,
                          num_workers=args.workers,
                          batch_size=batch_size,
                          pin_memory=torch.cuda.is_available())

    train_file_names, val_file_names = get_split()

    print('num train = {}, num_val = {}'.format(len(train_file_names),
                                                len(val_file_names)))

    def train_transform(p=1):
        return Compose(
            [
                #            Rescale(SIZE),
                RandomCrop(SIZE),
                RandomBrightness(0.2),
                OneOf([
                    IAAAdditiveGaussianNoise(),
                    GaussNoise(),
                ], p=0.15),
                #            OneOf([
                #                OpticalDistortion(p=0.3),
                #                GridDistortion(p=.1),
                #                IAAPiecewiseAffine(p=0.3),
                #            ], p=0.1),
                #            OneOf([
                #                IAASharpen(),
                #                IAAEmboss(),
                #                RandomContrast(),
                #                RandomBrightness(),
                #            ], p=0.15),
                HueSaturationValue(p=0.15),
                HorizontalFlip(p=0.5),
                Normalize(p=1),
            ],
            p=p)

    def val_transform(p=1):
        return Compose(
            [
                #            Rescale(256),
                RandomCrop(SIZE),
                Normalize(p=1)
            ],
            p=p)

    train_loader = make_loader(train_file_names,
                               shuffle=True,
                               transform=train_transform(p=1),
                               problem_type=args.type,
                               batch_size=args.batch_size)
    valid_loader = make_loader(val_file_names,
                               transform=val_transform(p=1),
                               problem_type=args.type,
                               batch_size=len(device_ids))

    root.joinpath('params.json').write_text(
        json.dumps(vars(args), indent=True, sort_keys=True))

    if args.type == 'binary':
        valid = validation_binary
    else:
        valid = validation_multi

    utils.train(init_optimizer=lambda lr: Adam(model.parameters(), lr=lr),
                args=args,
                model=model,
                criterion=loss,
                train_loader=train_loader,
                valid_loader=valid_loader,
                validation=valid,
                fold=args.fold,
                num_classes=num_classes)
コード例 #17
0
ファイル: train.py プロジェクト: tdyczek/unosat
def train(
    epochs: int,
    models_dir: Path,
    x_cities: List[CityData],
    y_city: List[CityData],
    mask_dir: Path,
):
    model = UNet11().cuda()
    optimizer = Adam(model.parameters(), lr=3e-4)
    scheduler = ReduceLROnPlateau(optimizer, patience=4, factor=0.25)
    min_loss = sys.maxsize
    criterion = nn.BCEWithLogitsLoss()
    train_data = DataLoader(TrainDataset(x_cities, mask_dir),
                            batch_size=4,
                            num_workers=4,
                            shuffle=True)
    test_data = DataLoader(TestDataset(y_city, mask_dir),
                           batch_size=6,
                           num_workers=4)

    for epoch in range(epochs):
        print(f'Epoch {epoch}, lr {optimizer.param_groups[0]["lr"]}')
        print(f"    Training")

        losses = []
        ious = []
        jaccs = []

        batch_iterator = enumerate(train_data)

        model = model.train().cuda()
        for i, (x, y) in tqdm(batch_iterator):
            optimizer.zero_grad()
            x = x.cuda()
            y = y.cuda()

            y_real = y.view(-1).float()
            y_pred = model(x)
            y_pred_probs = torch.sigmoid(y_pred).view(-1)
            loss = 0.75 * criterion(y_pred.view(
                -1), y_real) + 0.25 * dice_loss(y_pred_probs, y_real)

            iou_ = iou(y_pred_probs.float(), y_real.byte())
            jacc_ = jaccard(y_pred_probs.float(), y_real)
            ious.append(iou_.item())
            losses.append(loss.item())
            jaccs.append(jacc_.item())

            loss.backward()
            optimizer.step()

        print(
            f"Loss: {np.mean(losses)}, IOU: {np.mean(ious)}, jacc: {np.mean(jaccs)}"
        )

        model = model.eval().cuda()
        losses = []
        ious = []
        jaccs = []

        with torch.no_grad():
            batch_iterator = enumerate(test_data)
            for i, (x, y) in tqdm(batch_iterator):
                x = x.cuda()
                y = y.cuda()
                y_real = y.view(-1).float()
                y_pred = model(x)
                y_pred_probs = torch.sigmoid(y_pred).view(-1)
                loss = 0.75 * criterion(y_pred.view(
                    -1), y_real) + 0.25 * dice_loss(y_pred_probs, y_real)

                iou_ = iou(y_pred_probs.float(), y_real.byte())
                jacc_ = jaccard(y_pred_probs.float(), y_real)
                ious.append(iou_.item())
                losses.append(loss.item())
                jaccs.append(jacc_.item())
            test_loss = np.mean(losses)
            print(
                f"Loss: {np.mean(losses)}, IOU: {np.mean(ious)}, jacc: {np.mean(jaccs)}"
            )

        scheduler.step(test_loss)
        if test_loss < min_loss:
            min_loss = test_loss
            save_model(model, epoch, models_dir / y_city[0].name)