示例#1
0
def main(cfg):

    run_path = pjoin(cfg.out_path, cfg.exp_name)

    if (not os.path.exists(run_path)):
        os.makedirs(run_path)

    device = torch.device('cuda' if cfg.cuda else 'cpu')
    model = UNet(out_channels=1, in_channels=3).to(device)

    transf = make_data_aug(cfg)

    dl_train = LocPriorDataset(cfg.in_path,
                               normalization='rescale',
                               augmentations=transf,
                               locs_dir=cfg.locs_dir,
                               locs_fname=cfg.locs_fname,
                               resize_shape=cfg.in_shape)
    dl_init = LocPriorDataset(cfg.in_path,
                              normalization='rescale',
                              locs_dir=cfg.locs_dir,
                              locs_fname=cfg.locs_fname,
                              resize_shape=cfg.in_shape)

    frames_tnsr_brd = np.linspace(0,
                                  len(dl_train) - 1,
                                  num=cfg.n_ims_test,
                                  dtype=int)

    dataloader_train = DataLoader(dl_train,
                                  collate_fn=dl_train.collate_fn,
                                  shuffle=True,
                                  drop_last=True,
                                  batch_size=cfg.batch_size)
    dataloader_init = DataLoader(dl_init, collate_fn=dl_init.collate_fn)

    dataloaders = {
        'train': dataloader_train,
        'prev': frames_tnsr_brd,
        'init': dataloader_init
    }

    model = train(cfg, model, device, dataloaders, run_path)

    return model
示例#2
0
    def __init__(self, root_path, *args, **kwargs):

        self.root_path = root_path

        self.model_root_path = os.path.expanduser(pjoin('~', '.models'))

        self.dl = LocPriorDataset(self.root_path, *args, **kwargs)
        self.do_pb()
        self.do_hierarchies()
示例#3
0
def retrain_kmeans(cfg, in_cp_path, cp_path):
    import torch
    from torch.utils.data import DataLoader
    from ksptrack.utils.loc_prior_dataset import LocPriorDataset
    from ksptrack.siamese import utils as utls
    from ksptrack.siamese.modeling.siamese import Siamese

    device = torch.device('cuda' if cfg.cuda else 'cpu')
    model = Siamese(embedded_dims=cfg.embedded_dims,
                    cluster_number=cfg.n_clusters,
                    alpha=cfg.alpha,
                    backbone=cfg.backbone).to(device)
    if (cfg.clf):
        print('changing output of decoder to 1 channel')
        model.dec.autoencoder.to_predictor()

    print('loading checkpoint {}'.format(in_cp_path))
    state_dict = torch.load(in_cp_path,
                            map_location=lambda storage, loc: storage)
    model.load_state_dict(state_dict)

    dl = LocPriorDataset(pjoin(cfg.in_root, 'Dataset' + cfg.train_dir),
                         resize_shape=cfg.in_shape,
                         normalization='rescale')
    dl = DataLoader(dl, collate_fn=dl.collate_fn, num_workers=cfg.n_workers)
    init_clusters, preds, L, feats, labels = train_init_clst.train_kmeans(
        model,
        dl,
        torch.device('cuda' if cfg.cuda else 'cpu'),
        cfg.n_clusters,
        embedded_dims=cfg.embedded_dims,
        reduc_method=cfg.reduc_method,
        bag_t=cfg.bag_t,
        bag_n_feats=cfg.bag_n_feats,
        bag_max_depth=cfg.bag_max_depth,
        up_thr=cfg.ml_up_thr,
        down_thr=cfg.ml_down_thr)

    L = torch.tensor(L).float().to(device)
    init_clusters = torch.tensor(init_clusters, dtype=torch.float).to(device)

    model.dec.set_clusters(init_clusters)
    model.dec.set_transform(L.T)

    print('saving re-initialized checkpoint {}'.format(cp_path))
    utls.save_checkpoint({
        'epoch': -1,
        'model': model
    },
                         False,
                         fname_cp=os.path.split(cp_path)[-1],
                         path=os.path.split(cp_path)[0])
示例#4
0
    def __init__(self,
                 csv_path,
                 data_path,
                 model_pred_path,
                 loc_prior=False,
                 thr_entrance=0.5,
                 sigma=0.07,
                 sp_labels_fname='sp_labels.npy',
                 in_shape=512,
                 entrance_radius=0.05,
                 cuda=True):

        super().__init__(csv_path,
                         data_path,
                         thr_entrance=thr_entrance,
                         sp_labels_fname=sp_labels_fname)

        self.entrance_radius = entrance_radius
        self.thr_entrance = thr_entrance
        self.sigma = sigma

        self.device = torch.device('cuda' if cuda else 'cpu')
        self.data_path = data_path

        self.loc_prior = loc_prior
        self.model_pred = UNet(in_channels=3, out_channels=1)

        if not model_pred_path.endswith('.tar'):
            model_pred_path = sorted(
                glob(pjoin(model_pred_path, 'cp_*.pth.tar')))[-1]
        print('loading checkpoint {}'.format(model_pred_path))
        state_dict = torch.load(model_pred_path,
                                map_location=lambda storage, loc: storage)

        self.model_pred.load_state_dict(state_dict)
        self.model_pred.to(self.device)
        self.model_pred.eval()

        self.batch_to_device = lambda batch: {
            k: v.to(self.device) if (isinstance(v, torch.Tensor)) else v
            for k, v in batch.items()
        }

        self.dset = LocPriorDataset(data_path,
                                    normalization='rescale',
                                    resize_shape=in_shape,
                                    sp_labels_fname=sp_labels_fname)

        self.dl = DataLoader(self.dset, collate_fn=self.dset.collate_fn)

        self.prepare_feats()
示例#5
0
    def __init__(self, csv_path, data_path, thr_entrance=0.5):

        super().__init__()

        self.dset = LocPriorDataset(data_path,
                                    normalization='rescale',
                                    resize_shape=512)

        self.labels_ = np.squeeze(np.array([s['labels'] for s in self.dset]))
        self.shape = self.labels.shape[1:]
        self.trans_transform = None
        self.thr_clip = 0.001
        self.locs = csv.readCsv(csv_path, as_pandas=True)
        self.thr_entrance = thr_entrance
示例#6
0
from ksptrack.utils.loc_prior_dataset import LocPriorDataset
import pickle as pk
from os.path import join as pjoin

res_path = '/home/ubelix/lejeune/runs/ksptrack/Dataset00/exp_lfda'
npf = np.load(pjoin(res_path, 'results.npz'), allow_pickle=True)
paths_for = npf['paths_for']
paths_back = npf['paths_back']

print(paths_for)
# print(paths_back)

root = '/home/ubelix/lejeune/data/medical-labeling/Dataset00'
f0 = 59
f1 = 60
dl = LocPriorDataset(root)

look_for_0 = 209
look_for_1 = 209
# look for
res = []
for p in paths_for:
    if ((f0, look_for_0) in p):
        res.append(p)
import pdb
pdb.set_trace()  ## DEBUG ##

s0 = dl[f0]
s1 = dl[f1]

plt.subplot(221)
示例#7
0
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from ksptrack.utils.loc_prior_dataset import LocPriorDataset
from skimage import io
import numpy as np


# dl = Loader('/home/ubelix/lejeune/data/medical-labeling/Dataset10')
dl = LocPriorDataset(root_path='/home/ubelix/lejeune/data/medical-labeling/Dataset00',
                     normalization='std',
                     resize_shape=512,
                     csv_fname='video2.csv')

plt.ion()
sample = dl[40]
plt.subplot(121)
plt.imshow(sample['image_unnormal'])
plt.subplot(122)
plt.imshow(np.squeeze(sample['labels']))
plt.show()
import pdb; pdb.set_trace() ## DEBUG ##
示例#8
0
    def calc_sp_feats(self, cfg):
        """ 
        Computes UNet features in Autoencoder-mode
        Train/forward-propagate Unet and save features/weights
        """

        df_fname = 'sp_desc_{}.p'
        cp_fname = 'checkpoint_{}.pth.tar'
        bm_fname = 'best_model_{}.pth.tar'

        df_path = os.path.join(self.desc_path, df_fname)
        bm_path = os.path.join(self.desc_path, bm_fname)
        cp_path = os.path.join(self.desc_path, cp_fname)

        from ksptrack.models.deeplab import DeepLabv3Plus

        transf = iaa.Sequential([
            iaa.SomeOf(cfg.feat_data_someof, [
                iaa.Affine(
                    scale={
                        "x": (1 - cfg.feat_data_width_shift,
                              1 + cfg.feat_data_width_shift),
                        "y": (1 - cfg.feat_data_height_shift,
                              1 + cfg.feat_data_height_shift)
                    },
                    rotate=(-cfg.feat_data_rot_range, cfg.feat_data_rot_range),
                    shear=(-cfg.feat_data_shear_range,
                           cfg.feat_data_shear_range)),
                iaa.AdditiveGaussianNoise(
                    scale=cfg.feat_data_gaussian_noise_std * 255),
                iaa.Fliplr(p=0.5),
                iaa.Flipud(p=0.5)
            ]), rescale_augmenter,
            Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ])

        dl = LocPriorDataset(root_path=self.root_path,
                             sig_prior=cfg.feat_locs_gaussian_std,
                             augmentations=transf)
        if (self.feats_mode == 'autoenc'):
            sampler = None
            shuffle = True
        else:
            idx_refine = np.repeat(np.arange(len(dl)), 120 // len(dl))
            sampler = SubsetRandomSampler(idx_refine)
            shuffle = False

        dataloader = DataLoader(dl,
                                batch_size=cfg.batch_size,
                                shuffle=shuffle,
                                sampler=sampler,
                                collate_fn=dl.collate_fn,
                                drop_last=True,
                                num_workers=cfg.feat_n_workers)
        model = DeepLabv3Plus(pretrained=False)
        train_model(model, cfg, dataloader, cp_path.format('autoenc'),
                    self.desc_path, cp_fname, bm_fname, self.feats_mode)

        # Do forward pass on images and retrieve features
        if (not os.path.exists(df_path.format(self.feats_mode))):
            self.logger.info("Computing features on superpixels")

            transf = iaa.Sequential([
                rescale_augmenter,
                Normalize(mean=[0.485, 0.456, 0.406],
                          std=[0.229, 0.224, 0.225])
            ])

            dl.augmentations = transf
            dataloader = DataLoader(dl, batch_size=1, collate_fn=dl.collate_fn)
            model = DeepLabv3Plus()
            feats_sp = get_features(model, cfg, dataloader,
                                    cp_path.format(self.feats_mode),
                                    self.feats_mode)

            feats_df = self.centroids_loc.assign(desc=feats_sp)
            self.logger.info('Saving  features to {}.'.format(
                df_path.format(self.feats_mode)))
            feats_df.to_pickle(os.path.join(df_path.format(self.feats_mode)))
示例#9
0
            'output': x,
            'feats': aspp_feats,
        }


if __name__ == "__main__":
    model = DeepLabv3Plus()

    in_path = '/home/ubelix/lejeune/data/medical-labeling/Dataset00'
    cuda = False

    transf = iaa.Sequential([
        rescale_augmenter,
        Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    dl = LocPriorDataset(root_path=in_path, augmentations=transf)
    dataloader = DataLoader(dl,
                            batch_size=2,
                            shuffle=True,
                            collate_fn=dl.collate_fn,
                            num_workers=0)

    device = torch.device('cuda' if cuda else 'cpu')
    model.to(device)

    for e in range(10):
        for i, sample in enumerate(dataloader):

            im = sample['image'].to(device)

            out = model(im)
示例#10
0
          type=int,
          default=[
              39, 10, 50, 0, 37, 46, 49, 48, 68, 67, 22, 64, 27, 26, 26, 36
          ])
    p.add('--save-path', default='prevs.png')
    cfg = p.parse_args()

    cfg.csv_fname = 'video1.csv'
    cfg.locs_dir = 'gaze-measurements'
    cfg.coordconv = False

    ims = []
    for fin, dset in zip(cfg.fin, cfg.dsets):

        in_path = pjoin(cfg.root_in_path, 'Dataset' + dset)
        dl = LocPriorDataset(in_path, resize_shape=cfg.shape)
        sample = dl[fin]
        im = draw_gt_contour(sample['image'], sample['label/segmentation'])
        i, j = sample['annotations'].iloc[0].y, sample['annotations'].iloc[0].x
        im = draw_2d_loc(im, i, j)
        ims.append(im)

    n_cols = 4
    n_rows = 4
    fig = plt.figure()
    grid = ImageGrid(fig, 111, nrows_ncols=(n_rows, n_cols), axes_pad=0.02)

    pos = 0
    for i, im in enumerate(ims):
        grid[pos].imshow(im)
        grid[pos].axis('off')
示例#11
0
def main(cfg):

    device = torch.device('cuda' if cfg.cuda else 'cpu')

    # model = DeepLabv3Plus(pretrained=False)
    # model = UNet(merge_mode='none', depth=4)
    if (cfg.backbone == 'unet'):
        model = UNet(depth=4,
                     skip_mode='none',
                     l2_normalize=True,
                     coordconv=False,
                     dropout_max=0.)
    else:
        model = DeepLabv3Plus()
    model.to(device)

    run_path = pjoin(cfg.out_root, cfg.run_dir)

    if (not os.path.exists(run_path)):
        os.makedirs(run_path)

    transf, _ = im_utils.make_data_aug(cfg)

    dl = LocPriorDataset(pjoin(cfg.in_root, 'Dataset' + cfg.train_dir),
                         augmentations=transf,
                         normalization='rescale',
                         resize_shape=cfg.in_shape)

    cfg.batch_size = 2
    dataloader_train = DataLoader(dl,
                                  batch_size=cfg.batch_size,
                                  shuffle=True,
                                  collate_fn=dl.collate_fn,
                                  drop_last=True,
                                  num_workers=cfg.n_workers)

    dl_all_prev = LocPriorDataset(pjoin(cfg.in_root,
                                        'Dataset' + cfg.train_dir),
                                  normalization='rescale',
                                  resize_shape=cfg.in_shape)

    dataloader_all_prev = DataLoader(dl_all_prev, collate_fn=dl.collate_fn)
    dl_prev = Subset(
        dl_all_prev, np.linspace(0, len(dl) - 1, num=cfg.n_ims_test,
                                 dtype=int))
    dataloader_prev = DataLoader(dl_prev, collate_fn=dl.collate_fn)

    dataloaders = {
        'train': dataloader_train,
        'all': dataloader_all_prev,
        'prev': dataloader_prev
    }

    # Save cfg
    with open(pjoin(run_path, 'cfg.yml'), 'w') as outfile:
        yaml.dump(cfg.__dict__, stream=outfile, default_flow_style=False)

    optimizer = optim.Adam(
        params=[
            {
                'params': model.parameters(),
                'lr': cfg.lr_autoenc
            },
        ],
        weight_decay=cfg.decay,
    )

    print('run_path: {}'.format(run_path))

    train(cfg, model, dataloaders, run_path, device, optimizer)
示例#12
0
def main(cfg):
    locs2d = utls.readCsv(
        os.path.join(cfg.in_path, cfg.locs_dir, cfg.csv_fname))

    # ---------- Descriptors/superpixel costs
    dm = DataManager(cfg.in_path, cfg.precomp_dir)
    dm.calc_superpix(cfg.slic_compactness, cfg.slic_n_sp)

    link_agent, desc_df = make_link_agent(cfg)

    if (cfg.use_siam_pred):
        print('will use DEC/siam objectness probabilities')
        probas = link_agent.obj_preds
        pm_scores_fg = utls.get_pm_array(link_agent.labels, probas)
    else:
        pm = utls.calc_pm(desc_df,
                          np.array(link_agent.get_all_entrance_sps(desc_df)),
                          cfg.bag_n_feats, cfg.bag_t, cfg.bag_max_depth,
                          cfg.bag_max_samples, cfg.bag_jobs)
        pm_scores_fg = utls.get_pm_array(link_agent.labels, pm)

    dl = LocPriorDataset(cfg.in_path,
                         resize_shape=512,
                         normalization='rescale',
                         csv_fname=cfg.csv_fname)

    cluster_maps = link_agent.make_cluster_maps()

    if (cfg.do_all):
        cfg.fin = np.arange(len(dl))

    ims = []
    pbar = tqdm.tqdm(total=len(cfg.fin))
    for fin in cfg.fin:

        loc = locs2d[locs2d['frame'] == fin]
        if (loc.shape[0] > 0):
            i_in, j_in = link_agent.get_i_j(loc.iloc[0])

            entrance_probas = np.zeros(link_agent.labels.shape[1:])
            label_in = link_agent.labels[fin, i_in, j_in]
            for l in np.unique(link_agent.labels[fin]):
                proba = link_agent.get_proba(fin, label_in, fin, l, desc_df)
                entrance_probas[link_agent.labels[fin] == l] = proba

            truth = dl[fin]['label/segmentation'][..., 0]
            truth_ct = segmentation.find_boundaries(truth, mode='thick')
            im1 = dl[fin]['image_unnormal']
            rr, cc = draw.circle_perimeter(i_in,
                                           j_in,
                                           int(cfg.norm_neighbor_in *
                                               im1.shape[1]),
                                           shape=im1.shape)

            im1[truth_ct, ...] = (255, 0, 0)

            im1[rr, cc, 0] = 0
            im1[rr, cc, 1] = 255
            im1[rr, cc, 2] = 0

            im1 = csv.draw2DPoint(locs2d.to_numpy(), fin, im1, radius=7)
            ims_ = []
            ims_.append(im1)
            ims_.append(colorize(pm_scores_fg[fin]))
            ims_.append(
                colorize((pm_scores_fg[fin] > cfg.pm_thr).astype(float)))
            ims_.append(cluster_maps[fin])
            ims_.append(colorize(entrance_probas))
            ims.append(ims_)

        else:
            im1 = dl[fin]['image_unnormal']

            ims_ = []
            ims_.append(im1)
            ims_.append(colorize(pm_scores_fg[fin]))
            ims_.append(
                colorize((pm_scores_fg[fin] > cfg.pm_thr).astype(float)))
            ims_.append(cluster_maps[fin])
            ims_.append(colorize(np.zeros_like(pm_scores_fg[fin])))
            ims.append(ims_)

        pbar.update(1)
    pbar.close()

    if (cfg.do_all):
        print('will save all to {}'.format(cfg.save_path))
        if (not os.path.exists(cfg.save_path)):
            os.makedirs(cfg.save_path)
        pbar = tqdm.tqdm(total=len(ims))
        for i, im in enumerate(ims):
            io.imsave(pjoin(cfg.save_path, 'im_{:04d}.png'.format(i)),
                      np.concatenate(im, axis=1))
            pbar.update(1)
        pbar.close()

    if (cfg.return_dict):
        ims_dicts = []
        for ims_ in ims:
            dict_ = {
                'image': ims_[0],
                'pm': ims_[1],
                'pm_thr': ims_[2],
                'clusters': ims_[3],
                'entrance': ims_[4]
            }
            ims_dicts.append(dict_)
        return ims_dicts

    return np.concatenate([np.concatenate(im, axis=1) for im in ims], axis=0)
示例#13
0
def main(cfg):
    locs2d = utls.readCsv(
        os.path.join(cfg.in_path, cfg.locs_dir, cfg.locs_fname))

    # ---------- Descriptors/superpixel costs
    spext = SuperpixelExtractor(cfg.in_path,
                                desc_dir=cfg.precomp_dir,
                                compactness=cfg.slic_compactness,
                                n_segments=cfg.slic_n_sp)
    spext.run()

    link_agent, _ = make_link_agent(cfg)

    probas = link_agent.obj_preds
    pm_scores_fg = utls.get_pm_array(link_agent.labels, probas)

    dl = LocPriorDataset(cfg.in_path,
                         normalization='rescale',
                         locs_fname=cfg.locs_fname,
                         sp_labels_fname='sp_labels.npy')

    scores = dict()
    if cfg.do_scores:
        shape = pm_scores_fg.shape[1:]
        truths = np.array([
            transform.resize(s['label/segmentation'],
                             shape,
                             preserve_range=True).astype(np.uint8) for s in dl
        ])
        fpr, tpr, _ = roc_curve(truths.flatten(), pm_scores_fg.flatten())
        precision, recall, _ = precision_recall_curve(
            truths.flatten(),
            pm_scores_fg.flatten() >= 0.5)
        precision = precision[1]
        recall = recall[1]
        nom = 2 * (precision * recall)
        denom = (precision + recall)
        if denom > 0:
            f1 = nom / denom
        else:
            f1 = 0.

        auc_ = auc(fpr, tpr)
        scores['f1'] = f1
        scores['auc'] = auc_
        scores['fpr'] = fpr
        scores['tpr'] = tpr

    if (cfg.do_all):
        cfg.fin = np.arange(len(dl))

    ims = []
    pbar = tqdm.tqdm(total=len(cfg.fin))
    for fin in cfg.fin:

        loc = locs2d[locs2d['frame'] == fin]
        if (loc.shape[0] > 0):
            i_in, j_in = link_agent.get_i_j(loc.iloc[0])

            truth = dl[fin]['label/segmentation']
            truth_ct = segmentation.find_boundaries(truth, mode='thick')
            im1 = (255 * dl[fin]['image']).astype(np.uint8)
            rr, cc = draw.circle_perimeter(i_in,
                                           j_in,
                                           int(cfg.norm_neighbor_in *
                                               im1.shape[1]),
                                           shape=im1.shape)
            pos_labels = dl[fin]['annotations']

            pos_sps = [
                dl[fin]['labels'].squeeze() == l for l in pos_labels['label']
            ]

            pos_ct = [segmentation.find_boundaries(p) for p in pos_sps]

            for p in pos_ct:
                im1[p, ...] = (0, 255, 0)

            im1[truth_ct, ...] = (255, 0, 0)

            im1[rr, cc, 0] = 0
            im1[rr, cc, 1] = 255
            im1[rr, cc, 2] = 0

            im1 = csv.draw2DPoint(locs2d.to_numpy(), fin, im1, radius=7)
            ims_ = []
            ims_.append(im1)
            ims_.append(colorize(pm_scores_fg[fin]))
            ims_.append(
                colorize((pm_scores_fg[fin] >= cfg.pm_thr).astype(float)))
            ims.append(ims_)

        else:

            im1 = (255 * dl[fin]['image']).astype(np.uint8)
            ims_ = []
            ims_.append(im1)
            ims_.append(colorize(pm_scores_fg[fin]))
            ims_.append(
                colorize((pm_scores_fg[fin] >= cfg.pm_thr).astype(float)))
            ims_.append(colorize(np.zeros_like(pm_scores_fg[fin])))
            ims.append(ims_)

        pbar.update(1)
    pbar.close()

    if (cfg.do_all):
        print('will save all to {}'.format(cfg.save_path))
        if (not os.path.exists(cfg.save_path)):
            os.makedirs(cfg.save_path)
        pbar = tqdm.tqdm(total=len(ims))
        for i, im in enumerate(ims):
            io.imsave(pjoin(cfg.save_path, 'im_{:04d}.png'.format(i)),
                      np.concatenate(im, axis=1))
            pbar.update(1)
        pbar.close()

    res = dict()
    ims_dicts = []
    for ims_ in ims:
        dict_ = {
            'image': ims_[0],
            'pm': ims_[1],
            'pm_thr': ims_[2],
        }
        ims_dicts.append(dict_)
    res['images'] = ims_dicts
    res['scores'] = scores
    return res