コード例 #1
0
def load_model(model_path, **argkws):
    model_path = Path(model_path)

    bn = model_path.parent.name
    parts = bn.split('_')
    model_type = parts[1]
    loss_type = parts[2]

    state = torch.load(model_path, map_location='cpu')

    model = get_model(model_type, 1, 1, loss_type, **argkws)
    model.load_state_dict(state['state_dict'])
    model.eval()

    return model, state['epoch']
コード例 #2
0
def load_model(model_path, 
               flow_subdir = 'validation', 
               data_root_dir = None, 
               flow_type = None, 
               data_type = None,
               **argkws
               ):
    model_path = Path(model_path)
    bn = model_path.parent.name
    
    data_type_bn, _, remain = bn.partition('+F')
    flow_type_bn, _, remain = remain.partition('+roi')
    model_name, _, remain = remain.partition('_')[-1].partition('_')
    
    remain = remain.split('_')
    loss_type = remain[0]
    
    
    if data_type is None:
        data_type = data_type_bn
    
    if flow_type is None:
        flow_type = flow_type_bn
    
    flow_args = flow_types[flow_type]
    data_args = data_types[data_type]
    
    if data_root_dir is None:
        data_root_dir = data_args['root_data_dir']
    state = torch.load(model_path, map_location = 'cpu')
    
    n_ch_in = data_args['n_ch_in']
    n_ch_out = data_args['n_ch_out']
    model = get_model(model_name, n_ch_in, n_ch_out, loss_type, **argkws)
    model.load_state_dict(state['state_dict'])
    model.eval()
    
    data_flow = CoordFlow(data_root_dir / flow_subdir,
                        **flow_args,
                        is_preloaded = False
                        ) 
    
    return model, data_flow, state['epoch']
コード例 #3
0
def load_model(model_path, **argkws):
    model_path = Path(model_path)
    bn = model_path.parent.name

    data_type, _, remain = bn.partition('+F')
    flow_type, _, remain = remain.partition('+roi')
    model_name, _, remain = remain.partition('_')[-1].partition('_')

    remain = remain.split('_')
    loss_type = remain[0]

    state = torch.load(model_path, map_location='cpu')

    data_args = data_types[data_type]
    n_ch_in = data_args['n_ch_in']
    n_ch_out = data_args['n_ch_out']

    model = get_model(model_name, n_ch_in, n_ch_out, loss_type)
    model.load_state_dict(state['state_dict'])
    model.eval()

    return model, state['epoch']
コード例 #4
0
ファイル: train_locmax.py プロジェクト: ver228/woundhealing
def train(data_type='woundhealing-v2-mix',
          flow_type=None,
          model_name='unet-simple',
          use_classifier=False,
          loss_type='l1smooth-G1.5',
          cuda_id=0,
          log_dir=None,
          batch_size=256,
          n_epochs=2000,
          save_frequency=200,
          num_workers=0,
          root_data_dir=None,
          optimizer_name='adam',
          lr_scheduler_name='',
          lr=1e-5,
          weight_decay=0.0,
          momentum=0.9,
          roi_size=64,
          is_preloaded=False,
          hard_mining_freq=None,
          model_path_init=None,
          train_samples_per_epoch=40960):

    data_args = data_types[data_type]
    dflt_root_data_dir = data_args['root_data_dir']

    n_ch_in = data_args['n_ch_in']
    n_ch_out = data_args['n_ch_out']

    if flow_type is None:
        flow_type = data_args['dflt_flow_type']

    flow_args = flow_types[flow_type]

    if log_dir is None:
        if 'log_prefix' in data_args:
            log_dir = LOG_DIR_DFLT / data_args['log_prefix'] / data_type
        else:
            log_dir = LOG_DIR_DFLT / data_type

    if root_data_dir is None:
        root_data_dir = dflt_root_data_dir
    root_data_dir = Path(root_data_dir)

    train_dir = root_data_dir / 'train'
    test_dir = root_data_dir / 'validation'

    if '-merged' in data_type:
        flow_func = CoordFlowMerged
    else:
        flow_func = CoordFlow

    print(flow_func)
    print(root_data_dir)
    train_flow = flow_func(train_dir,
                           samples_per_epoch=train_samples_per_epoch,
                           roi_size=roi_size,
                           **flow_args,
                           is_preloaded=is_preloaded)

    val_flow = flow_func(test_dir,
                         roi_size=roi_size,
                         **flow_args,
                         is_preloaded=is_preloaded)

    model = get_model(model_name, n_ch_in, n_ch_out, loss_type)

    if model_path_init is not None:
        model_name += '-pretrained'
        state = torch.load(model_path_init, map_location='cpu')
        model.load_state_dict(state['state_dict'])

    device = get_device(cuda_id)

    optimizer = get_optimizer(optimizer_name,
                              model,
                              lr=lr,
                              momentum=momentum,
                              weight_decay=weight_decay)

    lr_scheduler = get_scheduler(lr_scheduler_name, optimizer)

    date_str = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
    hard_mining_str = '' if hard_mining_freq is None else f'+hard-neg-{hard_mining_freq}'
    lr_scheduler_name = '+' + lr_scheduler_name if lr_scheduler_name else ''

    save_prefix = f'{data_type}+F{flow_type}+roi{roi_size}{hard_mining_str}_{model_name}_{loss_type}_{date_str}'
    save_prefix = f'{save_prefix}_{optimizer_name}{lr_scheduler_name}_lr{lr}_wd{weight_decay}_batch{batch_size}'

    train_locmax(save_prefix,
                 model,
                 device,
                 train_flow,
                 val_flow,
                 optimizer,
                 lr_scheduler=lr_scheduler,
                 log_dir=log_dir,
                 batch_size=batch_size,
                 num_workers=num_workers,
                 hard_mining_freq=hard_mining_freq,
                 n_epochs=n_epochs,
                 save_frequency=save_frequency)
コード例 #5
0
                     scale_int=(0, 4095),
                     prob_unseeded_patch=0.5,
                     zoom_range=(0.97, 1.03),
                     int_aug_offset=(-0.2, 0.2),
                     int_aug_expansion=(0.7, 1.3),
                     samples_per_epoch=batch_size * 100)

    gen = CoordFlow(root_dir, **flow_args)

    loader = DataLoader(gen,
                        batch_size=batch_size,
                        shuffle=True,
                        num_workers=num_workers,
                        collate_fn=collate_simple)

    model = get_model('ind+clf+unet-simple', 3, 2, 'maxlikelihood')

    for images, targets in tqdm.tqdm(loader):
        images = torch.from_numpy(np.stack(images)).to(device)
        targets = [{
            k: torch.from_numpy(v).to(device)
            for k, v in target.items()
        } for target in targets]

        #%%
        model.train()
        losses = model(images, targets)
        loss = sum([v for v in losses.values()])
        loss.backward()

        #%%
コード例 #6
0
ファイル: locate_coords.py プロジェクト: ver228/woundhealing
    #    model_type = 'unet-simple'

    assert model_path.exists()
    #img_src_dir = Path.home() / 'workspace/localization/data/woundhealing/raw/'
    img_src_dir = Path.home(
    ) / 'workspace/localization/data/woundhealing/HitDataJan2020/'
    save_dir = Path.home(
    ) / 'workspace/localization/predictions/woundhealing_experiments' / bn

    n_ch_in = 1
    n_ch_out = 1

    device = get_device(cuda_id)
    model = get_model(model_type,
                      n_ch_in,
                      n_ch_out,
                      loss_type,
                      nms_threshold_abs=nms_threshold_abs,
                      nms_threshold_rel=nms_threshold_rel)

    state = torch.load(model_path, map_location='cpu')
    model.load_state_dict(state['state_dict'])
    model.eval()
    model = model.to(device)

    #%%
    img_paths = [
        x for x in img_src_dir.rglob('*.tif') if not x.name.startswith('.')
    ]
    for img_path in tqdm.tqdm(img_paths):
        img = cv2.imread(str(img_path), -1)