Exemple #1
0
 def show_ensemble_results(self,
                           files=None,
                           unc=True,
                           unc_metric=None,
                           metric_name='dice_score'):
     assert self.df_ens is not None, "Please run `get_ensemble_results` first."
     df = self.df_ens
     if files is not None:
         df = df.reset_index().set_index('file', drop=False).loc[files]
     for _, r in df.iterrows():
         imgs = []
         imgs.append(_read_img(r.image_path)[:])
         if metric_name in r.index:
             try:
                 msk = self.ds.labels[r.file][:]
             except:
                 msk = _read_msk(r.mask_path,
                                 n_classes=self.n_classes,
                                 instance_labels=self.instance_labels)
             imgs.append(msk)
             hastarget = True
         else:
             hastarget = False
         imgs.append(
             np.argmax(zarr.load(r.softmax_path), axis=-1).astype('uint8'))
         if unc: imgs.append(zarr.load(r.uncertainty_path))
         plot_results(*imgs,
                      df=r,
                      hastarget=hastarget,
                      metric_name=metric_name,
                      unc_metric=unc_metric)
Exemple #2
0
    def export_imagej_rois(self, output_folder='ROI_sets', **kwargs):
        assert self.df_ens is not None, "Please run prediction first."

        output_folder = Path(output_folder)
        output_folder.mkdir(exist_ok=True, parents=True)
        for idx, r in progress_bar(self.df_ens.iterrows(),
                                   total=len(self.df_ens)):
            mask = np.argmax(zarr.load(r.softmax_path),
                             axis=-1).astype('uint8')
            uncertainty = zarr.load(r.uncertainty_path)
            export_roi_set(mask,
                           uncertainty,
                           name=r.file,
                           path=output_folder,
                           ascending=False,
                           **kwargs)
Exemple #3
0
 def show_cellpose_results(self,
                           files=None,
                           unc=True,
                           unc_metric=None,
                           metric_name='mean_average_precision'):
     assert self.df_ens is not None, "Please run `get_ensemble_results` first."
     df = self.df_ens.reset_index()
     if files is not None: df = df.set_index('file', drop=False).loc[files]
     for _, r in df.iterrows():
         imgs = []
         imgs.append(_read_img(r.image_path)[:])
         if metric_name in r.index:
             try:
                 mask = self.ds.labels[idx][:]
             except:
                 mask = _read_msk(r.mask_path,
                                  n_classes=self.n_classes,
                                  instance_labels=self.instance_labels)
             _, comps = cv2.connectedComponents(
                 (mask == self.cellpose_export_class).astype('uint8'),
                 connectivity=4)
             imgs.append(label2rgb(comps, bg_label=0))
             hastarget = True
         else:
             hastarget = False
         imgs.append(label2rgb(self.cellpose_masks[r['index']], bg_label=0))
         if unc: imgs.append(zarr.load(r.uncertainty_path))
         plot_results(*imgs,
                      df=r,
                      hastarget=hastarget,
                      metric_name=metric_name,
                      unc_metric=unc_metric)
Exemple #4
0
    def get_cellpose_results(self, export_dir=None):
        assert self.df_ens is not None, "Please run `get_ensemble_results` first."
        cl = self.cellpose_export_class
        assert cl < self.n_classes, f'{cl} not avaialable from {self.n_classes} classes'

        smxs, preds = [], []
        for _, r in self.df_ens.iterrows():
            softmax = zarr.load(r.softmax_path)
            smxs.append(softmax)
            preds.append(np.argmax(softmax, axis=-1).astype('uint8'))

        probs = [x[..., cl] for x in smxs]
        masks = [x == cl for x in preds]
        cp_masks = run_cellpose(probs,
                                masks,
                                model_type=self.cellpose_model,
                                diameter=self.cellpose_diameter,
                                min_size=self.min_pixel_export,
                                gpu=torch.cuda.is_available())

        if export_dir:
            export_dir = Path(export_dir)
            cp_path = export_dir / 'cellpose_masks'
            cp_path.mkdir(parents=True, exist_ok=True)
            for idx, r in self.df_ens.iterrows():
                tifffile.imwrite(cp_path / f'{r.file}_class{cl}.tif',
                                 cp_masks[idx],
                                 compress=6)

        self.cellpose_masks = cp_masks
        return cp_masks
Exemple #5
0
    def to_pandas(self, start=0, end=None, chunk_size=None, max_chunks=None):
        start = start or self.start
        end = end or self.end
        chunk_size = chunk_size or self.chunk_size
        max_chunks = max_chunks or self.max_chunks

        if not chunk_size or not max_chunks:  # One shot load, suitable for small zarr files
            df = zarr.load(self.root.joinpath(self.zarr_path).as_posix()).get(
                self.field)
            df = df[start:end]
            df = map(self.parse, df)
        else:  # Chunked load, suitable for large zarr files
            df = []
            with zarr.open(self.root.joinpath(self.zarr_path).as_posix(),
                           "r") as zf:
                end = start + max_chunks * chunk_size if end is None else min(
                    end, start + max_chunks * chunk_size)
                for i_start in range(start, end, chunk_size):
                    items = zf[self.field][i_start:min(i_start +
                                                       chunk_size, end)]
                    items = map(self.parse, items)
                    df.append(items)
            df = it.chain(*df)

        df = pd.DataFrame.from_records(df)
        for col, col_dtype in self.dtypes.items():
            df[col] = df[col].astype(col_dtype, copy=False)
        return df
Exemple #6
0
 def show_valid_results(self, model_no=None, files=None, **kwargs):
     if self.df_val is None: self.get_valid_results(**kwargs)
     df = self.df_val
     if files is not None: df = df.set_index('file', drop=False).loc[files]
     if model_no is not None: df = df[df.model_no == model_no]
     for _, r in df.iterrows():
         img = self.ds.get_data(r.img_path)[0][:]
         msk = self.ds.get_data(r.img_path, mask=True)[0]
         pred = zarr.load(r.pred_path)
         std = zarr.load(r.std_path)
         _d_model = f'Model {r.model_no}'
         if self.tta:
             plot_results(img, msk, pred, std, df=r, model=_d_model)
         else:
             plot_results(img,
                          msk,
                          pred,
                          np.zeros_like(pred),
                          df=r,
                          model=_d_model)
Exemple #7
0
 def score_ensemble_results(self, mask_dir=None, label_fn=None):
     if not label_fn:
         label_fn = get_label_fn(self.df_ens.img_path[0],
                                 self.path / mask_dir)
     for idx, r in self.df_ens.iterrows():
         msk_path = self.label_fn(r.img_path)
         msk = _read_msk(msk_path)
         self.df_ens.loc[idx, 'msk_path'] = msk_path
         pred = zarr.load(r.pred_path)
         self.df_ens.loc[idx, 'iou'] = iou(msk, pred)
     return self.df_ens
Exemple #8
0
 def get_mask(self, index):
     imgid = self.ids[index]
     split = 'training' if not is_testing_split(self.split) else 'testing'
     imginfo = self.get_img_info(index)
     width = imginfo['width']
     height = imginfo['height']
     if split == 'training':
         mask = zarr.load(
             os.path.join(self.root, 'object', split, self.shape_prior_base, 'mask_2', imgid + '.zarr')) != 0
         mask = SegmentationMask(mask, (width, height), mode='mask')
     else:
         mask = SegmentationMask(np.zeros((height, width)), (width, height), mode='mask')
     return mask
Exemple #9
0
 def show_ensemble_results(self,
                           files=None,
                           model_no=None,
                           unc=True,
                           unc_metric=None):
     if self.df_ens is None:
         assert print("Please run `get_ensemble_results` first.")
     if model_no is None: df = self.df_ens
     else: df = self.df_models[df_models.model_no == model_no]
     if files is not None: df = df.set_index('file', drop=False).loc[files]
     for _, r in df.iterrows():
         imgs = []
         imgs.append(_read_img(r.img_path)[:])
         if 'iou' in r.index:
             imgs.append(_read_msk(r.msk_path))
             hastarget = True
         else:
             hastarget = False
         imgs.append(zarr.load(r.pred_path))
         if unc: imgs.append(zarr.load(r.std_path))
         plot_results(*imgs,
                      df=r,
                      hastarget=hastarget,
                      unc_metric=unc_metric)
Exemple #10
0
 def export_cellpose_rois(self,
                          output_folder='cellpose_ROI_sets',
                          **kwargs):
     output_folder = Path(output_folder)
     output_folder.mkdir(exist_ok=True, parents=True)
     for idx, r in progress_bar(self.df_ens.iterrows(),
                                total=len(self.df_ens)):
         mask = self.cellpose_masks[idx]
         uncertainty = zarr.load(r.uncertainty_path)
         export_roi_set(mask,
                        uncertainty,
                        instance_labels=True,
                        name=r.file,
                        path=output_folder,
                        ascending=False,
                        **kwargs)
Exemple #11
0
    def compress_dataset(self):
        """
        Loads the uncompressed data file, select all sample until the index specified in "startIdx.txt",
        compresses it and exports it
        :return:
        """
        data = zarr.load(self.crazyara_binary_dir + "data_" + self.device_name + ".zarr")

        export_dir, time_stamp = self.create_export_dir()
        zarr_path = export_dir + time_stamp + ".zip"
        nan_detected = compress_zarr_dataset(data, zarr_path, start_idx=0)
        if nan_detected is True:
            logging.error("NaN value detected in file %s.zip" % time_stamp)
            new_export_dir = self.crazyara_binary_dir + time_stamp
            os.rename(export_dir, new_export_dir)
            export_dir = new_export_dir
        self._move_game_data_to_export_dir(export_dir)
Exemple #12
0
 def score_ensemble_results(self, mask_dir=None, label_fn=None):
     if mask_dir is not None and label_fn is None:
         label_fn = get_label_fn(self.df_ens.image_path[0],
                                 self.path / mask_dir)
     for i, r in self.df_ens.iterrows():
         if label_fn is not None:
             msk_path = self.label_fn(r.image_path)
             msk = _read_msk(msk_path,
                             n_classes=self.n_classes,
                             instance_labels=self.instance_labels)
             self.df_ens.loc[i, 'mask_path'] = msk_path
         else:
             msk = self.ds.labels[r.file][:]
         pred = np.argmax(zarr.load(r.softmax_path),
                          axis=-1).astype('uint8')
         self.df_ens.loc[i, 'dice_score'] = dice_score(msk, pred)
     return self.df_ens
Exemple #13
0
    def compress_dataset(self, device_name: str):
        """
        Loads the uncompressed data file, selects all sample until the index specified in "startIdx.txt",
        compresses it and exports it.
        :param device_name: The currently active device name (context_device-id)
        :return:
        """
        data = zarr.load(self.binary_dir + "data_" + device_name + ".zarr")

        export_dir, time_stamp = self.create_export_dir(device_name)
        zarr_path = export_dir + time_stamp + ".zip"
        nan_detected = compress_zarr_dataset(data, zarr_path, start_idx=0)
        if nan_detected is True:
            logging.error("NaN value detected in file %s.zip" % time_stamp)
            new_export_dir = self.binary_dir + time_stamp
            os.rename(export_dir, new_export_dir)
            export_dir = new_export_dir
        self.move_game_data_to_export_dir(export_dir, device_name)
Exemple #14
0
 def get_kins_mask(self, index, len=None):
     imgid = self.ids[index]
     # split = 'training' if self.split != 'test' else 'testing'
     split = 'training' if not is_testing_split(self.split) else 'testing'
     imginfo = self.get_img_info(index)
     width = imginfo['width']
     height = imginfo['height']
     # try:
     p = os.path.join(self.root, 'object', split, 'kins_mask_2',
                      imgid + '.zarr')
     if split == 'training' and os.path.exists(p):
         mask = zarr.load(p) != 0
         mask = SegmentationMask(torch.tensor(mask).byte(), (width, height),
                                 mode='mask')
         # else:
         #     raise Exception()
     # except Exception as e:
     else:
         mask = torch.ones((len, height, width))
         mask = SegmentationMask(mask.byte(), (width, height), mode='mask')
     return mask
Exemple #15
0
def init(data_dir=None):
    """Must be called first to specify paths to data

    Args:
        data_dir(str): Path to the diretory hosting the training data.
    """
    # initiate global variables
    global LC_path, LC_zarr, cat_path, train_cat, meta_data, valid_IDs

    # if on sciserver, nothing is passed, then assign path to default
    if data_dir == None:
        data_dir = (
            "/home/idies/workspace/Temporary/ywx649999311/LSST_AGN/Class_Training/Data/"
        )
    
    LC_path = os.path.join(data_dir, "LCs.zarr.zip")
    LC_zarr = zarr.load(LC_path)
    cat_path = os.path.join(data_dir, "AllMasters.parquet")
    meta_data = yaml.safe_load(open(os.path.join(data_dir, "meta.yaml")))
    
    # get train_df and assign to global variable
    train_cat = _get_train_cat()
    valid_IDs = _get_valid_ids()
Exemple #16
0
def test_zarr(selenium):
    import numpy as np
    import zarr
    from numcodecs import Blosc

    # basic test
    z = zarr.zeros((1000, 1000), chunks=(100, 100), dtype="i4")
    assert z.shape == (1000, 1000)

    # test assignment
    z[0, :] = np.arange(1000)
    assert z[0, 1] == 1

    # test saving and loading
    a1 = np.arange(10)
    zarr.save("/tmp/example.zarr", a1)
    a2 = zarr.load("/tmp/example.zarr")
    np.testing.assert_equal(a1, a2)

    # test compressor
    compressor = Blosc(cname="zstd", clevel=3, shuffle=Blosc.BITSHUFFLE)
    data = np.arange(10000, dtype="i4").reshape(100, 100)
    z = zarr.array(data, chunks=(10, 10), compressor=compressor)
    assert z.compressor == compressor
 def get_left_img(self, index):
     leftimg = zarr.load(osp.join(self.leftimgdir, str(index) + '.zarr'))
     leftimg = torch.from_numpy(leftimg)
     return leftimg
Exemple #18
0
def load_zarr(path):
    """
    Utility to load the file from either dir that pytest might be called from.
    If called from root the path will be different than in the test dir
    """
    return zarr.load(str(dir_.joinpath(path)))
 def get_disparity(self, index):
     disparity = torch.from_numpy(zarr.load(osp.join(self.disparitydir, str(index) + '.zarr')))
     return disparity
 def get_right_img(self, index):
     rightimg = zarr.load(osp.join(self.rightimgdir, str(index) + '.zarr'))
     rightimg = torch.from_numpy(rightimg)
     return rightimg
Exemple #21
0
def _5(obj : ConstraintFormat) -> np.ndarray : 
    # for C in regress
    return zarr.load(str(obj))
                        help="Output a video, video frames or a plot.")

    return parser.parse_args()


if __name__ == "__main__":

    args = parse_args()
    config = getConfiguration(args.config)
    xambgfile = config['range_doppler_map_fname']
    if config['range_doppler_map_ftype'] == 'hdf5':
        f = h5py.File(xambgfile, 'r')
        xambg = np.abs(f['/xambg'])
        f.close()
    else:
        xambg = np.abs(zarr.load(xambgfile))

    print("Loaded range-doppler maps.")
    Nframes = xambg.shape[2]
    print("Applying CFAR filter...")
    # CFAR filter each frame using a 2D kernel
    CF = np.zeros(xambg.shape)
    for i in tqdm(range(Nframes)):
        CF[:, :, i] = CFAR_2D(xambg[:, :, i], 18, 4)

    print("Applying Kalman Filter...")
    history = simple_target_tracker(CF, config['max_range_actual'],
                                    config['max_doppler_actual'])

    estimate = history['estimate']
    measurement = history['measurement']
Exemple #23
0
 def read_zarr():
     _ = zarr.load('test.zarr')
Exemple #24
0
    def ensemble_results(self,
                         files,
                         path=None,
                         export_dir=None,
                         filetype='.png',
                         use_tta=None,
                         **kwargs):
        use_tta = use_tta or self.pred_tta
        if export_dir:
            export_dir = Path(export_dir)
            pred_path = export_dir / 'masks'
            pred_path.mkdir(parents=True, exist_ok=True)
            if use_tta:
                unc_path = export_dir / 'uncertainties'
                unc_path.mkdir(parents=True, exist_ok=True)

        store = str(path / 'ensemble') if path else zarr.storage.TempStore()
        root = zarr.group(store=store, overwrite=True)
        chunk_store = root.chunk_store.path
        g_smx, g_seg, g_std, g_eng = root.create_groups(
            'ens_smx', 'ens_seg', 'ens_std', 'ens_energy')
        res_list = []
        for f in files:
            df_fil = self.df_models[self.df_models.file == f.name]
            assert len(df_fil) == len(
                self.models), "Predictions and models to not match."
            m_smx, m_std, m_eng = tta.Merger(), tta.Merger(), tta.Merger()
            for idx, r in df_fil.iterrows():
                m_smx.append(zarr.load(r.smx_path))
                m_std.append(zarr.load(r.std_path))
                m_eng.append(zarr.load(r.eng_path))
            smx = m_smx.result().numpy()
            g_smx[f.name] = smx
            g_seg[f.name] = np.argmax(smx, axis=-1)
            g_std[f.name] = m_std.result().numpy()
            eng = m_eng.result()
            g_eng[f.name] = eng.numpy()
            m_eng_max = energy_max(eng, ks=self.energy_ks).numpy()
            df_tmp = pd.Series({
                'file':
                f.name,
                'model':
                f'{self.arch}_ensemble',
                'energy_max':
                m_eng_max,
                'img_path':
                f,
                'pred_path':
                f'{chunk_store}/{g_seg.path}/{f.name}',
                'smx_path':
                f'{chunk_store}/{g_smx.path}/{f.name}',
                'std_path':
                f'{chunk_store}/{g_std.path}/{f.name}',
                'eng_path':
                f'{chunk_store}/{g_eng.path}/{f.name}'
            })
            res_list.append(df_tmp)
            if export_dir:
                save_mask(g_seg[f.name][:],
                          pred_path / f'{df_tmp.file}_{df_tmp.model}_mask',
                          filetype)
                if use_tta:
                    save_unc(g_std[f.name][:],
                             unc_path / f'{df_tmp.file}_{df_tmp.model}_unc',
                             filetype)
        return pd.DataFrame(res_list)