Beispiel #1
0
def train(data_type='woundhealing',
          model_name='unet',
          loss_type='l1smooth',
          cuda_id=0,
          log_dir_root=log_dir_root_dflt,
          batch_size=16,
          optimizer_name='adam',
          lr=1e-5,
          weight_decay=0.0,
          momentum=0.9,
          roi_size=128,
          is_scaled_output=False,
          is_preloaded=False,
          **argkws):

    log_dir = log_dir_root / 'cell_demixer' / data_type

    dflts = data_types_dflts[data_type]
    n_ch_in = dflts['n_ch_in']
    n_ch_out = dflts['n_ch_out']
    flow_args = dflts['flow_args']

    train_flow = MergeFlow(**flow_args,
                           roi_size=roi_size,
                           is_preloaded=is_preloaded,
                           samples_per_epoch=20480)

    model = UNet(n_channels=n_ch_in, n_classes=n_ch_out)
    device = get_device(cuda_id)

    criterion = get_loss(loss_type)

    optimizer = get_optimizer(optimizer_name,
                              model,
                              lr=lr,
                              momentum=momentum,
                              weight_decay=weight_decay)

    date_str = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')

    if is_scaled_output:
        data_type += '-scaled'
    save_prefix = f'{data_type}-roi{roi_size}_{model_name}_{loss_type}_{date_str}'
    save_prefix = f'{save_prefix}_{optimizer_name}_lr{lr}_wd{weight_decay}_batch{batch_size}'

    train_demixer(save_prefix,
                  model,
                  device,
                  train_flow,
                  criterion,
                  optimizer,
                  log_dir,
                  batch_size=batch_size,
                  **argkws)
Beispiel #2
0
from pathlib import Path
root_dir = Path(__file__).resolve().parents[1]
sys.path.append(str(root_dir))
sys.path.append(str(root_dir / 'scripts'))

from cell_localization.trainer import get_device
from cell_localization.evaluation.localmaxima import score_coordinates

from load_model import load_model

import tqdm
import matplotlib.pylab as plt

if __name__ == '__main__':
    cuda_id = 0
    device = get_device(cuda_id)

    #bn = 'woundhealing-v2-mix+Fwoundhealing+roi96_unet-simple_l1smooth-G1.5_20190717_115942_adam_lr6.4e-05_wd0.0_batch64'
    #bn = 'woundhealing-v2-mix+Fwoundhealing+roi48_unet-simple_l2-G1.5_20190717_152330_sgd+stepLR-4-0.1_lr0.000256_wd0.0_batch256'
    #bn = 'woundhealing-v2-mix+Fwoundhealing+roi48_unet-simple_l2-G1.5_20190717_161240_adam_lr0.000128_wd0.0_batch128'

    model_dir = Path.home(
    ) / 'workspace/localization/results/locmax_detection/woundhealing-v2'

    model, data_flow = load_model(model_dir, bn)
    model = model.to(device)

    #%%
    N = len(data_flow.data_indexes)
    for ind in tqdm.trange(N):
        image, target = data_flow.read_full(
Beispiel #3
0
def train(
        data_type = 'heba',
        model_name = 'retinanet-resnet34',
        loss_type = 'focal',
        cuda_id = 0,
        log_dir = None,
        batch_size = 16,
        num_workers = 1,
        root_data_dir = None,
        lr = 1e-5,
        **argkws
        ):
    
    if log_dir is None:
        log_dir = log_dir_root_dflt / 'bbox_detection' / data_type
    
    
    dflts = data_types_dflts[data_type]
    root_data_dir = dflts['root_data_dir']
    roi_size = dflts['roi_size']
    min_radius = dflts['min_radius']
    scale_int = dflts['scale_int']
    num_classes = dflts['num_classes']
    
    if root_data_dir is None:
        root_data_dir = root_data_dir
    
    train_dir = root_data_dir / 'train'
    test_dir = root_data_dir / 'validation'
    
    bbox_encoder = BoxEncoder(img_size = (roi_size, roi_size),
               pyramid_levels = [1, 2, 3, 4, 5],
               aspect_ratios = [(1.,1.)]
               )
    
    print(root_data_dir)
    train_flow = CoordFlow(train_dir,
                    samples_per_epoch = 20480,
                    bbox_encoder = bbox_encoder,
                    min_radius = min_radius,
                    roi_size = roi_size,
                    scale_int = scale_int
                    )  
    
    val_flow = CoordFlow(test_dir,
                    samples_per_epoch = 640,
                    bbox_encoder = bbox_encoder,
                    min_radius = min_radius,
                    roi_size = roi_size,
                    scale_int = scale_int
                    ) 
    
    if model_name.startswith('retinanet'):
        backbone = model_name.split('-')[1]
    
        model = RetinaNet(backbone = backbone, 
                 is_fpn_small = True,
                 num_classes = num_classes, 
                 num_anchors = 3)
    
    device = get_device(cuda_id)
    
    loss_type = f'{loss_type}-{num_classes}'
    criterion = get_loss(loss_type)
    
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    
    
    date_str = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
    save_prefix = f'{data_type}_{model_name}_{loss_type}_{date_str}'
    save_prefix = f'{save_prefix}_adam_lr{lr}_batch{batch_size}'
    
    train_bboxes(save_prefix,
        model,
        device,
        train_flow,
        val_flow,
        criterion,
        optimizer,
        log_dir = log_dir,
        batch_size = batch_size,
        num_workers = num_workers,
        **argkws
        )