Exemplo n.º 1
0
def load_model(full_url, use_cuda=True, gpu_id=0, verbose=False):
    full_url = Path(full_url)
    full_path = str(full_url)

    nameproject = full_url.parent.parent.name
    patchproject = full_url.parent.parent.parent
    ckpt_path = '/'.join((full_url.parts)[-2:])
    file_name = full_url.name
    exp_type = full_url.parent.parent.parent.name

    net = SegmentationNeuralNet(patchproject=patchproject,
                                nameproject=nameproject,
                                no_cuda=not use_cuda,
                                parallel=False,
                                seed=2021,
                                print_freq=False,
                                gpu=gpu_id)

    check = net.load(full_path, verbose)

    if use_cuda:
        net.net.cuda(gpu_id)
    net.net.eval()

    return net
def load_model(full_url, use_cuda=False, gpu_id=0):
    full_path     = str(full_url)
    
    nameproject   = full_url.parent.parent.name
    patchproject  = full_url.parent.parent.parent
    ckpt_path     = '/'.join((full_url.parts)[-2:])
    file_name = full_url.name
    exp_type  = full_url.parent.parent.parent.name
    
    net = SegmentationNeuralNet(
        patchproject=patchproject, 
        nameproject=nameproject, 
        no_cuda=not use_cuda, parallel=False, seed=2021, 
        print_freq=False, gpu=gpu_id
    )

    try:

        if net.load( full_path ) is not True:
            print("*"*30)
            print("Not Found Warring: ",full_path)
            print("*"*30)
            
            return False, None, None
        

    except Exception as e:
        print("LOAD Error: ", e)
        print("*"*30)
        return False, None, None
    
    save_path = fr'extra/outputs/{DATASET_LABEL}/{nameproject}_{file_name}/'
    
    if use_cuda:
        net.net.cuda(gpu_id)
    net.net.eval()
    
    return True, net, save_path
Exemplo n.º 3
0
def main():
    
    # parameters
    parser       = arg_parser()
    args         = parser.parse_args()
    parallel     = args.parallel
    imcrop       = args.image_crop
    imsize       = args.image_size
    num_classes  = args.num_classes
    num_channels = args.num_channels    
    count_train  = args.count_train #10000
    count_test   = args.count_test #5000
    post_method  = args.post_method
    
    folders_contours ='touchs'
        
    print('Baseline clasification {}!!!'.format(datetime.datetime.now()))
    print('\nArgs:')
    [ print('\t* {}: {}'.format(k,v) ) for k,v in vars(args).items() ]
    print('')
    
    network = SegmentationNeuralNet(
        patchproject=args.project,
        nameproject=args.name,
        no_cuda=args.no_cuda,
        parallel=parallel,
        seed=args.seed,
        print_freq=args.print_freq,
        gpu=args.gpu
        )

    network.create( 
        arch=args.arch, 
        num_output_channels=num_classes, 
        num_input_channels=num_channels,
        loss=args.loss, 
        lr=args.lr, 
        momentum=args.momentum,
        optimizer=args.opt,
        lrsch=args.scheduler,
        pretrained=args.finetuning,
        size_input=imsize
        )
    
    cudnn.benchmark = True

    # resume model
    if args.resume:
        network.resume( os.path.join(network.pathmodels, args.resume ) )

    # print neural net class
    print('Load model: ')
    print(network)

        
    # datasets
    # training dataset
    train_data = dsxbdata.GenericDataset(
        args.data, 
        'train_single', 
        #folders_contours=folders_contours,
        count=count_train,
        num_channels=num_channels,
        transform=get_transforms_geom_color(),
        use_weight=True
    )

    """
    train_data = dsxbdata.TCellsDataset(
        args.data, 
        "train_single", 
        count=count_train,
        num_channels=num_channels,
        transform=get_simple_transforms(),
        )
    """
    
    train_loader = DataLoader(train_data, batch_size=args.batch_size_train, shuffle=True, 
        num_workers=args.workers, pin_memory=network.cuda, drop_last=True )
    
    val_data = dsxbdata.GenericDataset(
        args.data, 
        "validation", 
        #folders_contours=folders_contours,
        count=None,
        num_channels=num_channels,
        transform=get_simple_transforms(),
        use_weight=True
    )
        
    # validate dataset
    """val_data = dsxbdata.TCellsDataset(
        args.data, 
        "validation", 
        count=count_test,
        num_channels=num_channels,
        transform=get_simple_transforms(),
        )
"""
    val_loader = DataLoader(val_data, batch_size=args.batch_size_test, shuffle=False, 
        num_workers=args.workers, pin_memory=network.cuda, drop_last=False)
    print("*"*60, args.batch_size_train, args.batch_size_test, '*'*61)
    

        
    # print neural net class
    print('SEG-Torch: {}'.format(datetime.datetime.now()) )
    print(network)
    # training neural net
    def count_parameters(model):

        return sum(p.numel() for p in model.net.parameters() if p.requires_grad)

    print('N Param: ', count_parameters(network))
    network.fit( train_loader, val_loader, args.epochs, args.snapshot )
                   
    print("Optimization Finished!")
    print("DONE!!!")
Exemplo n.º 4
0
def main():

    # parameters
    parser = arg_parser()
    args = parser.parse_args()
    imsize = args.image_size
    parallel = args.parallel
    num_classes = 2
    num_channels = 3
    view_freq = 2

    network = SegmentationNeuralNet(
        patchproject=args.project,
        nameproject=args.name,
        no_cuda=args.no_cuda,
        parallel=parallel,
        seed=args.seed,
        print_freq=args.print_freq,
        gpu=args.gpu,
        view_freq=view_freq,
    )

    network.create(arch=args.arch,
                   num_output_channels=num_classes,
                   num_input_channels=num_channels,
                   loss=args.loss,
                   lr=args.lr,
                   momentum=args.momentum,
                   optimizer=args.opt,
                   lrsch=args.scheduler,
                   pretrained=args.finetuning,
                   size_input=imsize)

    # resume
    network.resume(os.path.join(network.pathmodels, args.resume))
    cudnn.benchmark = True

    # datasets
    # training dataset
    train_data = tgsdata.TGSDataset(
        args.data,
        tgsdata.train,
        count=16000,
        num_channels=num_channels,
        transform=transforms.Compose([
            mtrans.ToRandomTransform(mtrans.HFlip(), prob=0.5),
            mtrans.ToRandomTransform(mtrans.VFlip(), prob=0.5),
            mtrans.ToResize((300, 300),
                            resize_mode='squash',
                            padding_mode=cv2.BORDER_REFLECT_101),
            mtrans.RandomCrop((256, 256),
                              limit=10,
                              padding_mode=cv2.BORDER_REFLECT_101),
            mtrans.RandomScale(factor=0.2,
                               padding_mode=cv2.BORDER_REFLECT_101),
            mtrans.RandomGeometricalTransform(
                angle=30,
                translation=0.2,
                warp=0.02,
                padding_mode=cv2.BORDER_REFLECT_101),
            #mtrans.ToResizeUNetFoV(imsize, cv2.BORDER_REFLECT_101),
            mtrans.ToRandomTransform(mtrans.RandomBrightness(factor=0.15),
                                     prob=0.50),
            mtrans.ToRandomTransform(mtrans.RandomContrast(factor=0.15),
                                     prob=0.50),
            mtrans.ToRandomTransform(mtrans.RandomGamma(factor=0.15),
                                     prob=0.50),
            mtrans.ToRandomTransform(mtrans.ToGaussianBlur(), prob=0.15),
            mtrans.ToTensor(),
            mtrans.ToMeanNormalization(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
            )
            #mtrans.ToNormalization(),
        ]))

    train_loader = DataLoader(train_data,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.workers,
                              pin_memory=network.cuda,
                              drop_last=True)

    # validate dataset
    val_data = tgsdata.TGSDataset(
        args.data,
        tgsdata.test,
        count=4000,
        num_channels=num_channels,
        transform=transforms.Compose([
            mtrans.ToResize((256, 256), resize_mode='squash'),
            #mtrans.RandomCrop( (255,255), limit=50, padding_mode=cv2.BORDER_CONSTANT  ),
            #mtrans.ToResizeUNetFoV(imsize, cv2.BORDER_REFLECT_101),
            mtrans.ToTensor(),
            mtrans.ToMeanNormalization(
                mean=[0.485, 0.456, 0.406],
                std=[0.229, 0.224, 0.225],
            )
            #mtrans.ToNormalization(),
        ]))

    val_loader = DataLoader(val_data,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.workers,
                            pin_memory=network.cuda,
                            drop_last=True)

    # print neural net class
    print('SEG-Torch: {}'.format(datetime.datetime.now()))
    print(network)

    # training neural net
    network.fit(train_loader, val_loader, args.epochs, args.snapshot)

    print("Optimization Finished!")
    print("DONE!!!")
Exemplo n.º 5
0
def main():

    # parameters
    parser = arg_parser()
    args = parser.parse_args()
    parallel = args.parallel
    num_classes = args.num_classes
    num_channels = args.num_channels
    count_train = args.count_train  #10000
    count_test = args.count_test  #5000
    post_method = args.post_method
    weight = args.weight
    use_ori = int(args.use_ori)
    use_weights = weight != ''
    segs_per_forward = int(args.segs_per_forward)
    load_segs = bool(args.load_segs)
    just_eval = int(args.just_eval)
    use_bagging = int(args.use_bagging)
    bagging_seed = int(args.bagging_seed)
    middle = args.middle_proc

    folders_contours = 'touchs'

    print('Baseline clasification {}!!!'.format(datetime.datetime.now()))
    print('\nArgs:')
    [print('\t* {}: {}'.format(k, v)) for k, v in vars(args).items()]
    print('')

    num_input_channels = (num_channels * use_ori) + (load_segs *
                                                     segs_per_forward)
    writer = SummaryWriter('logs/' + args.name)

    network = SegmentationNeuralNet(
        patchproject=args.project,
        nameproject=args.name,
        no_cuda=args.no_cuda,
        parallel=parallel,
        seed=args.seed,
        print_freq=args.print_freq,
        gpu=args.gpu,
        writer=writer,
    )

    network.create(arch=args.arch,
                   num_output_channels=num_classes,
                   num_input_channels=num_input_channels,
                   loss=args.loss,
                   lr=args.lr,
                   momentum=args.momentum,
                   optimizer=args.opt,
                   lrsch=args.scheduler,
                   pretrained=args.finetuning,
                   cascade_type=args.cascade,
                   segs_per_forward=segs_per_forward,
                   use_ori=use_ori,
                   data_name=args.data,
                   middle_proc=middle)

    cudnn.benchmark = False  #due to augmentation

    # resume model
    if args.resume:
        network.resume(os.path.join(network.pathmodels, args.resume))

    if not just_eval:

        # datasets
        # training dataset
        train_data = dsxbdata.ISBIDataset(
            args.data,
            'train',
            folders_labels=f'labels{num_classes}c',
            count=count_train,
            num_classes=num_classes,
            num_channels=num_channels,
            transform=get_transforms_geom_color(),
            use_weight=use_weights,
            weight_name=weight,
            load_segments=load_segs,
            shuffle_segments=True,
            use_ori=use_ori,
            use_bagging=use_bagging,
            bagging_seed=bagging_seed,
            middle_proc=middle)

        train_loader = DataLoader(train_data,
                                  batch_size=args.batch_size_train,
                                  shuffle=True,
                                  num_workers=args.workers,
                                  pin_memory=False,
                                  drop_last=True)

        val_data = dsxbdata.ISBIDataset(args.data,
                                        "val",
                                        folders_labels=f'labels{num_classes}c',
                                        count=count_test,
                                        num_classes=num_classes,
                                        num_channels=num_channels,
                                        transform=get_transforms_geom_color(),
                                        use_weight=use_weights,
                                        weight_name=weight,
                                        load_segments=load_segs,
                                        shuffle_segments=True,
                                        use_ori=use_ori,
                                        middle_proc=middle)

        val_loader = DataLoader(val_data,
                                batch_size=args.batch_size_test,
                                shuffle=False,
                                num_workers=args.workers,
                                pin_memory=False,
                                drop_last=False)
        print("*" * 60, args.batch_size_train, args.batch_size_test, '*' * 61)
        print("*" * 60, len(train_loader), len(val_loader), '*' * 61)

        # print neural net class
        print('SEG-Torch: {}'.format(datetime.datetime.now()))

        #print(network)

        # training neural net
        def count_parameters(model):

            return sum(p.numel() for p in model.net.parameters()
                       if p.requires_grad)

        print('N Param: ', count_parameters(network))

        network.fit(train_loader, val_loader, args.epochs, args.snapshot)

        print("Optimization Finished!")
        print("DONE!!!")

        del val_data
        del train_data

    np.random.seed(0)

    test_data = dsxbdata.ISBIDataset(args.data,
                                     "test",
                                     folders_labels=f'labels{num_classes}c',
                                     count=254,
                                     num_classes=num_classes,
                                     num_channels=num_channels,
                                     transform=get_simple_transforms(),
                                     use_weight=use_weights,
                                     weight_name=weight,
                                     load_segments=load_segs,
                                     shuffle_segments=True,
                                     use_ori=use_ori,
                                     middle_proc=middle)

    test_loader = DataLoader(test_data,
                             batch_size=args.batch_size_test,
                             shuffle=False,
                             num_workers=args.workers,
                             pin_memory=network.cuda,
                             drop_last=False)

    network.evaluate(test_loader, -2, tag='Test')
Exemplo n.º 6
0
def main():
    
    # parameters
    parser       = arg_parser()
    args         = parser.parse_args()
    parallel     = args.parallel
    imcrop       = args.image_crop
    imsize       = args.image_size
    num_classes  = args.num_classes
    num_channels = args.num_channels    
    count_train  = args.count_train #10000
    count_test   = args.count_test #5000
    post_method  = args.post_method
    weight       = args.weight
    numsegs      = int(args.numsegs)
    pad          = int(args.pad)
    count_segs   = int(args.count_segs)
    load_segs    = bool(args.load_segments)
    use_ori      = int(args.use_ori)
    
    use_weights  = weight != ''
    
    folders_contours ='touchs'

    num_input_channels = (num_channels * use_ori) + (numsegs * load_segs)
        
    writer = SummaryWriter('logs/eval '+args.name)
    print('Baseline clasification {}!!!'.format(datetime.datetime.now()))
    
    
    network = SegmentationNeuralNet(
        patchproject=args.project,
        nameproject=args.name,
        no_cuda=args.no_cuda,
        parallel=parallel,
        seed=args.seed,
        print_freq=args.print_freq,
        gpu=args.gpu
        )

    network.create( 
        arch=args.arch, 
        num_output_channels=num_classes, 
        num_input_channels=num_input_channels,
        loss=args.loss, 
        lr=args.lr, 
        momentum=args.momentum,
        optimizer=args.opt,
        lrsch=args.scheduler,
        pretrained=args.finetuning,
        size_input=imsize,
        cascade_type=args.cascade
        writer=writer
        )
    
    
    epoch, value = (network.resume( os.path.join(network.pathmodels, args.resume ) ))
    assert epoch != 0, "Model W not found"
    
    cudnn.benchmark = True

    test_data = dsxbdata.ISBIDataset(
        args.data, 
        "test", 
        folders_labels=f'labels{num_classes}c',
        count=None,
        num_classes=num_classes,
        num_channels=num_channels,
        transform=get_simple_transforms(pad=pad),
        use_weight=use_weights,
        weight_name=weight,
        load_segments=load_segs,
        shuffle_segments=False,
        count_segments=count_segs,
        use_ori=use_ori
    )
        
    test_loader = DataLoader(test_data, batch_size=args.batch_size_test, shuffle=False, 
        num_workers=args.workers, pin_memory=network.cuda, drop_last=False)
    
    print(f"Model Val set: {value:0.3f}")
    network.evaluate( test_loader, epoch)
                   
    print("Eval Finished!")
    print("DONE!!!")
Exemplo n.º 7
0
        transform=transforms.Compose([
            mtrans.ToResize( (256,256), resize_mode='squash', padding_mode=cv2.BORDER_REFLECT_101 ),
            #mtrans.ToResizeUNetFoV(imsize, cv2.BORDER_REFLECT_101),
            mtrans.ToTensor(),
            #mtrans.ToNormalization(), 
            mtrans.ToMeanNormalization( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], )
            ])
        )

    # load model
    print('>> Load model ...')

    net = SegmentationNeuralNet( 
        patchproject=project, 
        nameproject=projectname, 
        no_cuda=cuda, 
        parallel=parallel, 
        seed=seed, 
        gpu=gpu 
        )

    if net.load( pathnamemodel ) is not True:
        assert(False)

    #folder_files = os.path.expanduser( os.path.join(pathnamedataset, 'sample_submission.csv' )  )
    #submission = pd.read_csv( folder_files )
    #results = { i:rlecode  for i,rlecode in zip( submission['id'], submission['rle_mask'] ) }  
    results = list()    
    for idx in tqdm( range( len(dataset) ) ):   
        
        sample = dataset[ idx ]    
        idname = dataset.data.getimagename( idx )
Exemplo n.º 8
0
opt='adam'
scheduler='fixed'
finetuning=False
nepoch=10
size_input=388
snapshot=5
view_freq=1
num_workers=10
batch_size=10
count=1000

network = SegmentationNeuralNet(
        patchproject=project,
        nameproject=name,
        no_cuda=no_cuda,
        parallel=parallel,
        seed=seed,
        print_freq=print_freq,
        gpu=gpu,
        view_freq=view_freq,
        )

# load model
print('||> load model ...')
start = time.time()
if network.load( path_model ) is not True:
    assert(False)
t = time.time() - start
print('||> load model time: {}sec'.format(t) )


print('||> load dataset ...')
Exemplo n.º 9
0
scheduler='fixed'
finetuning=False
nepoch=10
size_input=100
snapshot=5
view_freq=1
num_workers=1
batch_size=3
count=100


network = SegmentationNeuralNet(
        patchproject=project,
        nameproject=name,
        no_cuda=no_cuda,
        parallel=parallel,
        seed=seed,
        print_freq=print_freq,
        gpu=gpu,
        view_freq=view_freq,
        )

print('||> create model ...')
start = time.time()
network.create(
        arch=arch, 
        num_output_channels=num_classes, 
        num_input_channels=num_channels,  
        loss=loss, 
        lr=lr, 
        momentum=momentum,
        optimizer=opt,