def main():
    #torch.manual_seed(42)

    # ------------
    # args
    # ------------
    parser = ArgumentParser()

    # Learning parameters
    parser.add_argument('--auto_lr', type=U.str2bool, default=False,help="Auto lr finder")
    parser.add_argument('--learning_rate', type=float, default=10e-4)
    parser.add_argument('--scheduler', type=U.str2bool, default=False)
    parser.add_argument('--wd', type=float, default=2e-4)
    parser.add_argument('--moment', type=float, default=0.9)
    parser.add_argument('--batch_size', default=5, type=int)
    parser.add_argument('--n_epochs', default=10, type=int)
    parser.add_argument('--iter_every', default=1, type=int,help="Accumulate compute graph for iter_size step")
    parser.add_argument('--benchmark', default=False, type=U.str2bool, help="enable or disable backends.cudnn")
    
    # Model and eval
    parser.add_argument('--model', default='FCN', type=str,help="FCN or DLV3 model")
    parser.add_argument('--pretrained', default=False, type=U.str2bool,help="Use pretrained pytorch model")
    parser.add_argument('--eval_angle', default=True, type=U.str2bool,help=\
        "If true, it'll eval the model with different angle input size")
    
    
    # Data augmentation
    parser.add_argument('--rotate', default=False, type=U.str2bool,help="Use random rotation as data augmentation")
    parser.add_argument('--pi_rotate', default=True, type=U.str2bool,help="Use only pi/2 rotation angle")
    parser.add_argument('--p_rotate', default=0.25, type=float,help="Probability of rotating the image during the training")
    parser.add_argument('--scale', default=True, type=U.str2bool,help="Use scale as data augmentation")
    parser.add_argument('--landcover', default=False, type=U.str2bool,\
         help="Use Landcover dataset instead of VOC and COCO")
    parser.add_argument('--size_img', default=520, type=int,help="Size of input images")
    parser.add_argument('--size_crop', default=480, type=int,help="Size of crop image during training")
    parser.add_argument('--angle_max', default=360, type=int,help="Angle max for data augmentation")
    
    # Dataloader and gpu
    parser.add_argument('--nw', default=0, type=int,help="Num workers for the data loader")
    parser.add_argument('--pm', default=True, type=U.str2bool,help="Pin memory for the dataloader")
    parser.add_argument('--gpu', default=0, type=int,help="Wich gpu to select for training")
    
    # Datasets 
    parser.add_argument('--split', default=False, type=U.str2bool, help="Split the dataset")
    parser.add_argument('--split_ratio', default=0.3, type=float, help="Amount of data we used for training")
    parser.add_argument('--dataroot_voc', default='/data/voc2012', type=str)
    parser.add_argument('--dataroot_sbd', default='/data/sbd', type=str)
    parser.add_argument('--dataroot_landcover', default='/share/DEEPLEARNING/datasets/landcover', type=str)
    
    # Save parameters
    parser.add_argument('--model_name', type=str,help="what name to use for saving")
    parser.add_argument('--save_dir', default='/data/save_model', type=str)
    parser.add_argument('--save_all_ep', default=False, type=U.str2bool,help=\
        "If true it'll save the model every epoch in save_dir")
    parser.add_argument('--save_best', default=False, type=U.str2bool,help="If true will only save the best epoch model")
    args = parser.parse_args()
    
    # ------------
    # device
    # ------------
    device = torch.device("cuda:"+str(args.gpu) if torch.cuda.is_available() else "cpu")
    print("device used:",device)
    
    # ------------
    # data
    # ------------
    if args.size_img < args.size_crop:
        raise Exception('Cannot have size of input images less than size of crop')
    size_img = (args.size_img,args.size_img)
    size_crop = (args.size_crop,args.size_crop)
    if not args.landcover:
        train_dataset_VOC = mdset.VOCSegmentation(args.dataroot_voc,year='2012', image_set='train', \
            download=True,rotate=args.rotate,size_img=size_img,size_crop=size_crop)
        test_dataset = mdset.VOCSegmentation(args.dataroot_voc,year='2012', image_set='val', download=True)
        train_dataset_SBD = mdset.SBDataset(args.dataroot_sbd, image_set='train_noval',mode='segmentation',\
            rotate=args.rotate,size_img=size_img,size_crop=size_crop)
        #COCO dataset 
        if args.extra_coco:
            extra_COCO = cu.get_coco(args.dataroot_coco,'train',rotate=args.rotate,size_img=size_img,size_crop=size_crop)
            # Concatene dataset
            train_dataset = tud.ConcatDataset([train_dataset_VOC,train_dataset_SBD,extra_COCO])
        else:
            train_dataset = tud.ConcatDataset([train_dataset_VOC,train_dataset_SBD])
        num_classes = 21
    else:
        print('Loading Landscape Dataset')
        train_dataset = mdset.LandscapeDataset(args.dataroot_landcover,image_set="trainval",\
            rotate=args.rotate,pi_rotate=args.pi_rotate,p_rotate=args.p_rotate,size_img=size_img,size_crop=size_crop,angle_max=args.angle_max)
        test_dataset = mdset.LandscapeDataset(args.dataroot_landcover,image_set="test")
        print('Success load Landscape Dataset')
        num_classes = 4
    
    split = args.split
    if split==True:
        train_dataset = U.split_dataset(train_dataset,args.split_ratio)
    # Print len datasets
    print("There is",len(train_dataset),"images for training and",len(test_dataset),"for validation")
    dataloader_train = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size,num_workers=args.nw,\
        pin_memory=args.pm,shuffle=True,drop_last=True)#,collate_fn=U.my_collate)
    dataloader_val = torch.utils.data.DataLoader(test_dataset,num_workers=args.nw,pin_memory=args.pm,\
        batch_size=args.batch_size)

    
    # ------------
    # model
    # ------------
    
    if args.model.upper()=='FCN':
        model = models.segmentation.fcn_resnet101(pretrained=args.pretrained,num_classes=num_classes)
    elif args.model.upper()=='DLV3':
        model = models.segmentation.deeplabv3_resnet101(pretrained=args.pretrained,num_classes=num_classes)
    else:
        raise Exception('model must be "FCN" or "DLV3"')
    #model.to(device)

    
    # ------------
    # save
    # ------------
    save_dir = U.create_save_directory(args.save_dir)
    print('model will be saved in',save_dir)
    U.save_hparams(args,save_dir)

    # ------------
    # training
    # ------------
    # Auto lr finding
    print(args)
    
    criterion = nn.CrossEntropyLoss(ignore_index=num_classes) # On ignore la classe border.
    torch.autograd.set_detect_anomaly(True)
    optimizer = torch.optim.SGD(model.parameters(),lr=args.learning_rate,momentum=args.moment,weight_decay=args.wd)
    
    ev.train_fully_supervised(model=model,n_epochs=args.n_epochs,train_loader=dataloader_train,val_loader=dataloader_val,\
        criterion=criterion,optimizer=optimizer,save_folder=save_dir,scheduler=args.scheduler,auto_lr=args.auto_lr,\
            model_name=args.model_name,benchmark=args.benchmark, save_best=args.save_best,save_all_ep=args.save_all_ep,\
                device=device,num_classes=num_classes)
if VOC:
    num_classes = 21
    train_dataset_VOC = mdset.VOCSegmentation(dataroot_voc,year='2012', image_set='train', \
            download=True,rotate=rotate,scale=scale,size_img=size_img,size_crop=size_crop)
    test_dataset = mdset.VOCSegmentation(dataroot_voc,
                                         year='2012',
                                         image_set='val',
                                         download=True)
    train_dataset_SBD = mdset.SBDataset(dataroot_sbd, image_set='train_noval',mode='segmentation',\
            rotate=rotate,scale=scale,size_img=size_img,size_crop=size_crop)
    train_dataset = tud.ConcatDataset([train_dataset_VOC, train_dataset_SBD])

else:
    num_classes = 4
    print('Loading Landscape Dataset')
    train_dataset = mdset.LandscapeDataset(dataroot_landcover,image_set="trainval",\
        rotate=rotate)#,size_img=size_img,size_crop=size_crop)
    test_dataset = mdset.LandscapeDataset(dataroot_landcover, image_set="test")
    test_dataset_no_norm = mdset.LandscapeDataset(dataroot_landcover,
                                                  image_set="test",
                                                  normalize=False)
    print('Success load Landscape Dataset')
dataloader_train = torch.utils.data.DataLoader(train_dataset, batch_size=bs,num_workers=nw,\
        pin_memory=pm,shuffle=True,drop_last=True)#,collate_fn=U.my_collate)
dataloader_val = torch.utils.data.DataLoader(test_dataset,num_workers=nw,pin_memory=pm,\
        batch_size=bs)

l_angles = [0, 30, 45, 60]
l_iou = []
for angle in l_angles:
    test_dataset = mdset.VOCSegmentation(dataroot_voc,
                                         year='2012',
Esempio n. 3
0
def main():
    #torch.manual_seed(42)

    # ------------
    # args
    # ------------
    parser = ArgumentParser()

    # Model and eval
    parser.add_argument('--model_name',
                        default='rot_equiv_lc.pt',
                        type=str,
                        help="Model name")
    parser.add_argument(
        '--model_dir',
        default='/share/homes/karmimy/equiv/save_model/rot_equiv_lc',
        type=str,
        help="Model name")
    parser.add_argument('--expe', default='17', type=str, help="Expe")
    args = parser.parse_args()

    # DATASETS
    dataroot_landcover = '/share/DEEPLEARNING/datasets/landcover'

    model_dir = args.model_dir  # Saved model dir
    expe = args.expe
    model_name = args.model_name
    folder_model = join(model_dir, expe)

    nw = 4
    pm = True
    # GPU
    gpu = 0
    # EVAL PARAMETERS
    bs = 1

    # DEVICE
    # Decide which device we want to run on
    device = torch.device("cuda:" +
                          str(gpu) if torch.cuda.is_available() else "cpu")
    print("device :", device)

    model = torch.load(join(folder_model, model_name), map_location=device)

    test_dataset = mdset.LandscapeDataset(dataroot_landcover, image_set="test")

    l_angles = [210, 240, 270, 300, 330, 0, 30, 60, 90, 120, 150]
    #l_angles = [330,340,350,0,10,20,30]
    l_iou = []
    l_iou_bg = []
    l_iou_c1 = []
    l_iou_c2 = []
    l_iou_c3 = []
    for angle in l_angles:
        test_dataset = mdset.LandscapeDataset(dataroot_landcover,
                                              image_set="test",
                                              fixing_rotate=True,
                                              angle_fix=angle)
        dataloader_val = torch.utils.data.DataLoader(test_dataset,num_workers=nw,pin_memory=pm,\
            batch_size=bs)
        if angle % 90 == 0:
            state = ev.eval_model(model,
                                  dataloader_val,
                                  device=device,
                                  num_classes=4)
            m_iou, iou = state.metrics['mean IoU'], state.metrics['IoU']
        else:
            m_iou, iou = ev.eval_model_tmetrics(model,
                                                dataloader_val,
                                                device=device,
                                                num_classes=4)

        try:
            m_iou = m_iou.item()
        except:
            print(m_iou, type(m_iou))
            m_iou = float(m_iou)
        l_iou.append(round(m_iou, 3))
        print('EVAL FOR ANGLE', angle, ': IoU', m_iou)
        print('IoU All classes', iou)
        l_iou_bg.append(float(iou[0]))
        l_iou_c1.append(float(iou[1]))
        l_iou_c2.append(float(iou[2]))
        l_iou_c3.append(float(iou[3]))
    l_iou.append(l_iou[0])
    l_iou.append(l_iou_bg[0])
    l_iou.append(l_iou_c1[0])
    l_iou.append(l_iou_c2[0])
    l_iou.append(l_iou_c3[0])

    print('L_IOU', l_iou)
    print('L_IOU', l_iou_bg)
    print('L_IOU', l_iou_c1)
    print('L_IOU', l_iou_c2)
    print('L_IOU', l_iou_c3)
Esempio n. 4
0
def main():
    #torch.manual_seed(42)

    # ------------
    # args
    # ------------
    parser = ArgumentParser()
    

    parser.add_argument('--gpu', default=0, type=int,help="Device")
    args = parser.parse_args()



    # ------------
    # device
    # ------------
    device = torch.device("cuda:"+str(args.gpu) if torch.cuda.is_available() else "cpu")
    print("device used:",device)
    # ------------
    # model
    # ------------

    N_CLASSES = 4

    # Save_dir 

    save_dir = '/share/homes/karmimy/equiv/save_model/landcover_visu'
    # ------------
    # dataset and dataloader
    # ------------
    dataroot_landcover = '/share/DEEPLEARNING/datasets/landcover'
    bs = 1
    num_classes = 4
    pm = True
    nw = 4
   

   
    

    print('Loading Landscape Dataset')
    test_dataset = mdset.LandscapeDataset(dataroot_landcover,image_set="test")
    test_dataset_no_norm =  mdset.LandscapeDataset(dataroot_landcover,image_set="test",normalize=False)
    print('Success load Landscape Dataset')

    dataloader_val = torch.utils.data.DataLoader(test_dataset,num_workers=nw,pin_memory=pm,\
    batch_size=bs)
    list_iter = np.arange(len(test_dataset))
    np.random.shuffle(list_iter)

    # count model 
    cpt = 0
    # First model
    model = torch.load('/share/homes/karmimy/equiv/save_model/fully_supervised_lc/31/fcn_fully_sup_lc.pt',map_location=device)
    infere_and_save(model,save_dir,list_iter,test_dataset_no_norm,test_dataset,device,cpt)
    print('Visu of model',cpt,'Ended')
    cpt+=1

    model = torch.load('/share/homes/karmimy/equiv/save_model/fully_supervised_lc/30/fcn_fully_sup_lc.pt',map_location=device)
    infere_and_save(model,save_dir,list_iter,test_dataset_no_norm,test_dataset,device,cpt)
    print('Visu of model',cpt,'Ended')
    cpt+=1
    
    model = torch.load('/share/homes/karmimy/equiv/save_model/rot_equiv_lc/17/rot_equiv_lc.pt',map_location=device)
    infere_and_save(model,save_dir,list_iter,test_dataset_no_norm,test_dataset,device,cpt)
    print('Visu of model',cpt,'Ended')
def main():
    #torch.manual_seed(42)
    # ------------
    # args
    # ------------
    parser = ArgumentParser()

    #Learning parameters
    parser.add_argument('--auto_lr',
                        type=U.str2bool,
                        default=False,
                        help="Auto lr finder")
    parser.add_argument('--learning_rate', type=float, default=10e-4)
    parser.add_argument('--Loss', type=str, default='KL')
    parser.add_argument('--gamma',
                        type=float,
                        default=0.5,
                        help="gamma balance the two losses")
    parser.add_argument('--scheduler', type=U.str2bool, default=False)
    parser.add_argument('--wd', type=float, default=2e-4)
    parser.add_argument('--moment', type=float, default=0.9)
    parser.add_argument('--batch_size', default=5, type=int)
    parser.add_argument('--iter_every',
                        default=1,
                        type=int,
                        help="Accumulate compute graph for iter_size step")
    parser.add_argument('--n_epochs', default=10, type=int)
    parser.add_argument('--benchmark',
                        default=True,
                        type=U.str2bool,
                        help="enable or disable backends.cudnn")

    # Model and eval
    parser.add_argument('--model',
                        default='FCN',
                        type=str,
                        help="FCN or DLV3 model")
    parser.add_argument('--pretrained',
                        default=False,
                        type=U.str2bool,
                        help="Use pretrained pytorch model")
    parser.add_argument('--eval_angle', default=True, type=U.str2bool,\
        help= "If true, it'll eval the model with different angle input size")
    parser.add_argument('--eval_every',
                        default=30,
                        type=int,
                        help="Eval all input rotation angle every n step")

    # Data augmentation
    parser.add_argument('--rotate',
                        default=False,
                        type=U.str2bool,
                        help="Use random rotation as data augmentation")
    parser.add_argument('--pi_rotate',
                        default=True,
                        type=U.str2bool,
                        help="Use only pi/2 rotation angle")
    parser.add_argument('--angle_max',
                        default=30,
                        type=int,
                        help="Max angle rotation of input image")
    parser.add_argument('--size_img',
                        default=520,
                        type=int,
                        help="Size of input images")
    parser.add_argument('--size_crop',
                        default=480,
                        type=int,
                        help="Size of crop image during training")

    # Dataloader and gpu
    parser.add_argument('--nw',
                        default=0,
                        type=int,
                        help="Num workers for the data loader")
    parser.add_argument('--pm',
                        default=True,
                        type=U.str2bool,
                        help="Pin memory for the dataloader")
    parser.add_argument('--gpu',
                        default=0,
                        type=int,
                        help="Wich gpu to select for training")
    parser.add_argument(
        '--rot_cpu',
        default=False,
        type=U.str2bool,
        help="Apply rotation on the cpu (Help to use less gpu memory)")
    parser.add_argument('--split',
                        default=True,
                        type=U.str2bool,
                        help="Split the dataset")
    parser.add_argument('--split_ratio',
                        default=0.3,
                        type=float,
                        help="Amount of data we used for training")
    parser.add_argument(
        '--multi_task',
        default=False,
        type=U.str2bool,
        help="Multi task training (same data for equiv and sup)")

    # Dataset utils
    parser.add_argument('--extra_coco', default=False, type=U.str2bool,\
         help="Use coco dataset as extra annotation for fully supervised training")
    parser.add_argument('--landcover', default=False, type=U.str2bool,\
         help="Use Landcover dataset instead of VOC and COCO")

    # Load and save path
    parser.add_argument('--dataroot_voc', default='~/data/voc2012', type=str)
    parser.add_argument('--dataroot_sbd', default='~/data/sbd', type=str)
    parser.add_argument('--dataroot_coco',
                        default='/share/DEEPLEARNING/datasets/coco',
                        type=str)
    parser.add_argument('--dataroot_landcover',
                        default='/share/DEEPLEARNING/datasets/landcover',
                        type=str)
    parser.add_argument('--model_name',
                        type=str,
                        help="what name to use for saving")
    parser.add_argument('--save_dir', default='/data/save_model', type=str)

    # Save mode
    parser.add_argument('--save_all_ep', default=False, type=U.str2bool,help=\
        "If true it'll save the model every epoch in save_dir")
    parser.add_argument('--save_best',
                        default=False,
                        type=U.str2bool,
                        help="If true will only save the best epoch model")
    parser.add_argument('--load_last_model',
                        default=False,
                        type=U.str2bool,
                        help="If it will load the last model saved with\
                                                                                    This parameters."
                        )
    args = parser.parse_args()
    # ------------
    # device
    # ------------
    device = torch.device(
        "cuda:" + str(args.gpu) if torch.cuda.is_available() else "cpu")
    print("device used:", device)
    # ------------
    # model
    # ------------

    # ------------
    # data
    # ------------
    if args.size_img < args.size_crop:
        raise Exception(
            'Cannot have size of input images less than size of crop')
    size_img = (args.size_img, args.size_img)
    size_crop = (args.size_crop, args.size_crop)
    if not args.landcover:
        print('Loading VOC and SBD Dataset')
        train_dataset_VOC = mdset.VOCSegmentation(args.dataroot_voc,year='2012', image_set='train', \
            download=True,rotate=args.rotate,size_img=size_img,size_crop=size_crop)
        test_dataset = mdset.VOCSegmentation(args.dataroot_voc,
                                             year='2012',
                                             image_set='val',
                                             download=True)
        train_dataset_SBD = mdset.SBDataset(args.dataroot_sbd, image_set='train_noval',mode='segmentation',\
            rotate=args.rotate,size_img=size_img,size_crop=size_crop)
        print('Success Load VOC and SBD Dataset')
        #COCO dataset
        if args.extra_coco:
            print('Loading COCO Dataset')
            extra_COCO = cu.get_coco(args.dataroot_coco,
                                     'train',
                                     rotate=args.rotate,
                                     size_img=size_img,
                                     size_crop=size_crop)
            print('Success COCO Dataset')
        # Concatene dataset
        train_dataset_unsup = tud.ConcatDataset(
            [train_dataset_VOC, train_dataset_SBD])
        num_classes = 21
    else:
        print('Loading Landscape Dataset')
        train_dataset_unsup = mdset.LandscapeDataset(args.dataroot_landcover,image_set="trainval",\
            rotate=args.rotate,pi_rotate=args.pi_rotate,size_img=size_img,size_crop=size_crop)
        test_dataset = mdset.LandscapeDataset(args.dataroot_landcover,
                                              image_set="test")
        num_classes = 4
        print('Success load Landscape Dataset')

    # Split dataset
    split = args.split
    if split == True:
        train_dataset_sup = U.split_dataset(train_dataset_unsup,
                                            args.split_ratio)
    else:
        train_dataset_sup = train_dataset_unsup
    # Multi task ?
    if args.multi_task:
        train_dataset_unsup = train_dataset_sup

    # If extra coco concatene all dataset for unsupervised training
    if args.extra_coco and not args.landcover:
        train_dataset_unsup = tud.ConcatDataset(
            [train_dataset_VOC, train_dataset_SBD, extra_COCO])

    # Print len datasets
    print("There is",len(train_dataset_sup),"images for supervised training",len(train_dataset_unsup),\
        "for equivariance loss and",len(test_dataset),"for validation")

    dataloader_train_sup = torch.utils.data.DataLoader(train_dataset_sup, batch_size=args.batch_size,num_workers=args.nw,\
        pin_memory=args.pm,shuffle=True,drop_last=True)
    dataloader_val = torch.utils.data.DataLoader(test_dataset,num_workers=args.nw,pin_memory=args.pm,\
        batch_size=args.batch_size)
    # ---------
    # Load model
    # ---------
    if args.load_last_model:
        model,save_dir = fbm.load_best_model(save_dir=args.save_dir,model_name=args.model_name,split=args.split,\
            split_ratio=args.split_ratio,batch_size =args.batch_size,rotate=args.rotate)
        print("Training will continue from this file.", save_dir)
    else:
        save_dir = U.create_save_directory(
            args.save_dir)  # Create a new save directory
        if args.model.upper() == 'FCN':
            model = models.segmentation.fcn_resnet101(
                pretrained=args.pretrained, num_classes=num_classes)
        elif args.model.upper() == 'DLV3':
            model = models.segmentation.deeplabv3_resnet101(
                pretrained=args.pretrained, num_classes=num_classes)
        else:
            raise Exception('model must be "FCN" or "DLV3"')
        model.to(device)

    # ------------
    # save
    # ------------
    print('model will be saved in', save_dir)
    U.save_hparams(args, save_dir)

    print('PARAMS')
    print(args)
    # ------------
    # training
    # ------------
    # Auto lr finding
    #if args.auto_lr==True:

    criterion_supervised = nn.CrossEntropyLoss(
        ignore_index=num_classes)  # On ignore la classe border.
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.learning_rate,
                                momentum=args.moment,
                                weight_decay=args.wd)
    ev.train_rot_equiv(model,args.n_epochs,dataloader_train_sup,train_dataset_unsup,dataloader_val,criterion_supervised,optimizer,\
        scheduler=args.scheduler,Loss=args.Loss,gamma=args.gamma,batch_size=args.batch_size,iter_every=args.iter_every,\
            save_folder=save_dir,model_name=args.model_name,benchmark=args.benchmark,angle_max=args.angle_max,size_img=args.size_img,\
        eval_every=args.eval_every,save_all_ep=args.save_all_ep,dataroot_voc=args.dataroot_voc,save_best=args.save_best\
           ,rot_cpu=args.rot_cpu ,device=device,num_classes=num_classes)

    # Final evaluation
    """