Пример #1
0
def shard_val_data(imgs, lbls, meta, orig, gpus, patch_size, overlap):
    if gpus == 1:
        return imgs, lbls, meta, orig

    rank = int(os.getenv("LOCAL_RANK", "0"))
    intervals = [d * overlap for d in patch_size]
    print0("Balancing evaluation mode...")
    pool = Pool(processes=8)
    work = np.array(
        pool.map(partial(calculate_inference_cost, intervals=intervals), lbls))

    sort_idx = np.argsort(work)[::-1]
    imgs, lbls = np.array(imgs), np.array(lbls)
    work = work[sort_idx]
    imgs, lbls = imgs[sort_idx], lbls[sort_idx]
    if meta is not None:
        meta, orig = np.array(meta), np.array(orig)
        meta, orig = meta[sort_idx], orig[sort_idx]

    imgs_balanced, lbls_balanced, meta_balanced, orig_balanced = ([[]] *
                                                                  gpus, ) * 4
    curr_work_per_shard = np.zeros((gpus, ))

    for w_idx, w in enumerate(work):
        idx = np.argmin(curr_work_per_shard)
        curr_work_per_shard[idx] += w
        imgs_balanced[idx].append(imgs[w_idx])
        lbls_balanced[idx].append(lbls[w_idx])
        if meta is not None:
            meta_balanced[idx].append(meta[w_idx])
            orig_balanced[idx].append(orig[w_idx])

    print0("Done!")
    return imgs_balanced[rank], lbls_balanced[rank], meta_balanced[
        rank], orig_balanced[rank]
Пример #2
0
    def build_nnunet(self):
        in_channels, out_channels, kernels, strides, self.patch_size = self.get_unet_params(
        )
        self.n_class = out_channels - 1
        if self.args.brats:
            out_channels = 3

        self.model = DynUNet(
            self.args.dim,
            in_channels,
            out_channels,
            kernels,
            strides,
            strides[1:],
            filters=self.args.filters,
            norm_name=("INSTANCE", {
                "affine": True
            }),
            act_name=("leakyrelu", {
                "inplace": True,
                "negative_slope": 0.01
            }),
            deep_supervision=self.args.deep_supervision,
            deep_supr_num=self.args.deep_supr_num,
            res_block=self.args.res_block,
            trans_bias=True,
        )
        print0(
            f"Filters: {self.model.filters},\nKernels: {kernels}\nStrides: {strides}"
        )
Пример #3
0
    def setup(self, stage=None):
        meta = load_data(self.data_path, "*_meta.npy")
        orig_lbl = load_data(self.data_path, "*_orig_lbl.npy")
        imgs, lbls = load_data(self.data_path,
                               "*_x.npy"), load_data(self.data_path, "*_y.npy")
        self.test_imgs, test_meta = get_test_fnames(self.args, self.data_path,
                                                    meta)

        if self.args.exec_mode != "predict" or self.args.benchmark:
            train_idx, val_idx = list(self.kfold.split(imgs))[self.args.fold]
            orig_lbl, meta = get_split(orig_lbl,
                                       val_idx), get_split(meta, val_idx)
            self.kwargs.update({"orig_lbl": orig_lbl, "meta": meta})
            self.train_imgs, self.train_lbls = get_split(imgs,
                                                         train_idx), get_split(
                                                             lbls, train_idx)
            self.val_imgs, self.val_lbls = get_split(imgs, val_idx), get_split(
                lbls, val_idx)
        else:
            self.kwargs.update({"meta": test_meta})
        print0(
            f"{len(self.train_imgs)} training, {len(self.val_imgs)} validation, {len(self.test_imgs)} test examples"
        )