Пример #1
0
    def __init__(self,
                 model_path,
                 arch='marvin',
                 imposed_batch_size=1,
                 channels_to_load=(0, 1, 2, 3),
                 normal=False,
                 nb_labels=2,
                 normalize_data=False,
                 normalize_func=None,
                 init_gpu=None):
        self.imposed_batch_size = imposed_batch_size
        self.channels_to_load = channels_to_load
        self.arch = arch
        self._path = model_path
        self._fname = os.path.split(model_path)[1]
        self.nb_labels = nb_labels
        self.normal = normal
        self.normalize_data = normalize_data
        self.normalize_func = normalize_func

        if init_gpu is None:
            init_gpu = 'auto'
        if e2config.device is None:
            from elektronn2.utils.gpu import initgpu
            initgpu(init_gpu)
        elektronn2.logger.setLevel("ERROR")
        from elektronn2.neuromancer.model import modelload
        self.model = modelload(model_path,
                               replace_bn='const',
                               imposed_batch_size=imposed_batch_size)
        self.original_do_rates = self.model.dropout_rates
        self.model.dropout_rates = ([
            0.0,
        ] * len(self.original_do_rates))
Пример #2
0
        # "xset q" will always succeed to run if an X server is currently running
        check_call(['xset', 'q'], stdout=devnull, stderr=devnull)
        print('X available')
        # Don't set backend explicitly, use system default...
    # if "xset q" fails, conclude that X is not running
    except (OSError, CalledProcessError):
        print('X unavailable')
        matplotlib.use('AGG')

from elektronn2.config import config
config.use_manual_cudnn_conv = True
config.use_manual_cudnn_conv_not_w1 = False
config.uuse_manual_cudnn_pool = True
if config.device is None:
    from elektronn2.utils.gpu import initgpu
    initgpu('auto')

from elektronn2.neuromancer.model import modelload, kernel_lists_from_node_descr

if model_path in [None, 'None']:
    model_path = "~/axon/mkilling/investigation/MA-TEX/CNN-Timings/old_cnn_rec.mdl"

no_gc = False
mfp = False

model_path = os.path.expanduser(model_path)
model_dir, model_name = os.path.split(model_path)
os.chdir(model_dir)
f_name = model_name[:-4] + '-Speed.csv'

# Benchmark Model Compiled with static Shape #
Пример #3
0
def predict_kzip(kzip_p,
                 m_path,
                 kd_path,
                 clf_thresh=0.5,
                 mfp_active=False,
                 dest_path=None,
                 overwrite=False,
                 gpu_ix=0,
                 imposed_patch_size=None):
    """
    Predicts data contained in k.zip file (defined by bounding box in knossos)

    Parameters
    ----------
    kzip_p : str
        path to kzip containing the raw data cube information
    m_path : str
        path to predictive model
    kd_path : str
        path to knossos dataset
    clf_thresh : float
        classification threshold
    overwrite : bool
    mfp_active : False
    imposed_patch_size : tuple
    dest_path : str
        path to destination folder, if None folder of k.zip is used.
    gpu_ix : int
    """
    cube_name = os.path.splitext(os.path.basename(kzip_p))[0]
    if dest_path is None:
        dest_path = os.path.dirname(kzip_p)
    if not os.path.isfile(dest_path + "/%s_data.h5" % cube_name) or overwrite:
        raw, labels = load_gt_from_kzip(kzip_p,
                                        kd_p=kd_path,
                                        raw_data_offset=0)
        raw = xyz2zxy(raw)
        initgpu(gpu_ix)
        from elektronn2.neuromancer.model import modelload
        m = modelload(
            m_path,
            imposed_patch_size=list(imposed_patch_size) if isinstance(
                imposed_patch_size, tuple) else imposed_patch_size,
            override_mfp_to_active=mfp_active,
            imposed_batch_size=1)
        original_do_rates = m.dropout_rates
        m.dropout_rates = ([
            0.0,
        ] * len(original_do_rates))
        pred = m.predict_dense(raw[None, ], pad_raw=True)[1]
        # remove area without sufficient FOV
        pred = zxy2xyz(pred)
        raw = zxy2xyz(raw)
        save_to_h5py([pred, raw], dest_path + "/%s_data.h5" % cube_name,
                     ["pred", "raw"])
    else:
        pred, raw = load_from_h5py(dest_path + "/%s_data.h5" % cube_name,
                                   hdf5_names=["pred", "raw"])
    offset = parse_movement_area_from_zip(kzip_p)[0]
    overlaycubes2kzip(dest_path + "/%s_pred.k.zip" % cube_name,
                      (pred >= clf_thresh).astype(np.uint32), offset, kd_path)
Пример #4
0
def _pred_dataset(kd_p,
                  kd_pred_p,
                  cd_p,
                  model_p,
                  imposed_patch_size=None,
                  mfp_active=False,
                  gpu_id=0,
                  overwrite=False,
                  i=None,
                  n=None):
    """
    Helper function for dataset prediction. Runs prediction on whole or partial
    knossos dataset. Imposed patch size has to be given in Z, X, Y!

    Parameters
    ----------
    kd_p : str
        path to knossos dataset .conf file
    kd_pred_p : str
        path to the knossos dataset head folder which will contain the prediction
    cd_p : str
        destination folder for chunk dataset containing prediction
    model_p : str
        path tho ELEKTRONN2 model
    imposed_patch_size : tuple or None
        patch size (Z, X, Y) of the model
    mfp_active : bool
        activate max-fragment pooling (might be necessary to change patch_size)
    gpu_id : int
        the GPU used
    overwrite : bool
        True: fresh predictions ; False: earlier prediction continues
        

    Returns
    -------

    """

    initgpu(gpu_id)
    from elektronn2.neuromancer.model import modelload
    kd = KnossosDataset()
    kd.initialize_from_knossos_path(kd_p, fixed_mag=1)

    m = modelload(model_p,
                  imposed_patch_size=list(imposed_patch_size) if isinstance(
                      imposed_patch_size, tuple) else imposed_patch_size,
                  override_mfp_to_active=mfp_active,
                  imposed_batch_size=1)
    original_do_rates = m.dropout_rates
    m.dropout_rates = ([
        0.0,
    ] * len(original_do_rates))
    offset = m.target_node.shape.offsets
    offset = np.array([offset[1], offset[2], offset[0]], dtype=np.int)
    cd = ChunkDataset()
    cd.initialize(kd,
                  kd.boundary, [512, 512, 256],
                  cd_p,
                  overlap=offset,
                  box_coords=np.zeros(3),
                  fit_box_size=True)

    ch_dc = cd.chunk_dict
    print('Total number of chunks for GPU/GPUs:', len(ch_dc.keys()))

    if i is not None and n is not None:
        chunks = ch_dc.values()[i::n]
    else:
        chunks = ch_dc.values()
    print("Starting prediction of %d chunks in gpu %d\n" %
          (len(chunks), gpu_id))

    if not overwrite:
        for chunk in chunks:
            try:
                _ = chunk.load_chunk("pred")[0]
            except Exception as e:
                chunk_pred(chunk, m)
    else:
        for chunk in chunks:
            try:
                chunk_pred(chunk, m)
            except KeyboardInterrupt as e:
                print("Exiting out from chunk prediction: ", str(e))
                return
    save_dataset(cd)

    # single gpu processing also exports the cset to kd
    if n is None:
        kd_pred = KnossosDataset()
        kd_pred.initialize_without_conf(kd_pred_p,
                                        kd.boundary,
                                        kd.scale,
                                        kd.experiment_name,
                                        mags=[1, 2, 4, 8])
        cd.export_cset_to_kd(kd_pred,
                             "pred", ["pred"], [4, 4],
                             as_raw=True,
                             stride=[256, 256, 256])
Пример #5
0
def predict_h5(h5_path,
               m_path,
               clf_thresh=None,
               mfp_active=False,
               gpu_ix=0,
               imposed_patch_size=None,
               hdf5_data_key=None,
               data_is_zxy=True,
               dest_p=None,
               dest_hdf5_data_key="pred",
               as_uint8=True):
    """
    Predicts data from h5 file. Assumes raw data is already float32.

    Parameters
    ----------
    h5_path : str
        path to h5 containing the raw data
    m_path : str
        path to predictive model
    clf_thresh : float
        classification threshold, if None, no thresholding
    mfp_active : False
    imposed_patch_size : tuple
    gpu_ix : int
    hdf5_data_key: str
        if None, it uses the first entry in the list returned by
        'load_from_h5py'
    data_is_zxy : bool
        if False, it will assumes data is [X, Y, Z]
    as_uint8: bool
    dest_p : str
    dest_hdf5_data_key : str
    """
    if hdf5_data_key:
        raw = load_from_h5py(h5_path, hdf5_names=[hdf5_data_key])[0]
    else:
        raw = load_from_h5py(h5_path, hdf5_names=None)
        assert len(raw) == 1, "'hdf5_data_key' not given but multiple hdf5 " \
                              "elements found. Please define raw data key."
        raw = raw[0]
    if not data_is_zxy:
        raw = xyz2zxy(raw)
    initgpu(gpu_ix)
    if raw.dtype.kind in ('u', 'i'):
        raw = raw.astype(np.float32) / 255.
    from elektronn2.neuromancer.model import modelload
    m = modelload(m_path,
                  imposed_patch_size=list(imposed_patch_size) if isinstance(
                      imposed_patch_size, tuple) else imposed_patch_size,
                  override_mfp_to_active=mfp_active,
                  imposed_batch_size=1)
    original_do_rates = m.dropout_rates
    m.dropout_rates = ([
        0.0,
    ] * len(original_do_rates))
    pred = m.predict_dense(raw[None, ], pad_raw=True)[1]
    pred = zxy2xyz(pred)
    raw = zxy2xyz(raw)
    if as_uint8:
        pred = (pred * 255).astype(np.uint8)
        raw = (raw * 255).astype(np.uint8)
    if clf_thresh:
        pred = (pred >= clf_thresh).astype(np.float32)
    if dest_p is None:
        dest_p = h5_path[:-3] + "_pred.h5"
    if hdf5_data_key is None:
        hdf5_data_key = "raw"
    save_to_h5py([raw, pred], dest_p, [hdf5_data_key, dest_hdf5_data_key])