Пример #1
0
    def __init__(self,
                 model_path,
                 arch='marvin',
                 imposed_batch_size=1,
                 channels_to_load=(0, 1, 2, 3),
                 normal=False,
                 nb_labels=2,
                 normalize_data=False,
                 normalize_func=None,
                 init_gpu=None):
        self.imposed_batch_size = imposed_batch_size
        self.channels_to_load = channels_to_load
        self.arch = arch
        self._path = model_path
        self._fname = os.path.split(model_path)[1]
        self.nb_labels = nb_labels
        self.normal = normal
        self.normalize_data = normalize_data
        self.normalize_func = normalize_func

        if init_gpu is None:
            init_gpu = 'auto'
        if e2config.device is None:
            from elektronn2.utils.gpu import initgpu
            initgpu(init_gpu)
        elektronn2.logger.setLevel("ERROR")
        from elektronn2.neuromancer.model import modelload
        self.model = modelload(model_path,
                               replace_bn='const',
                               imposed_batch_size=imposed_batch_size)
        self.original_do_rates = self.model.dropout_rates
        self.model.dropout_rates = ([
            0.0,
        ] * len(self.original_do_rates))
Пример #2
0
def to_knossos_dataset(kd_p,
                       kd_pred_p,
                       cd_p,
                       model_p,
                       imposed_patch_size,
                       mfp_active=False):
    """

    Parameters
    ----------
    kd_p : str
    kd_pred_p : str
    cd_p : str
    model_p :
    imposed_patch_size :
    mfp_active :

    Returns
    -------

    """
    from elektronn2.neuromancer.model import modelload

    kd = KnossosDataset()
    kd.initialize_from_knossos_path(kd_p, fixed_mag=1)
    kd_pred = KnossosDataset()
    m = modelload(model_p,
                  imposed_patch_size=list(imposed_patch_size) if isinstance(
                      imposed_patch_size, tuple) else imposed_patch_size,
                  override_mfp_to_active=mfp_active,
                  imposed_batch_size=1)
    original_do_rates = m.dropout_rates
    m.dropout_rates = ([
        0.0,
    ] * len(original_do_rates))
    offset = m.target_node.shape.offsets
    offset = np.array([offset[1], offset[2], offset[0]], dtype=np.int)
    cd = ChunkDataset()
    cd.initialize(kd,
                  kd.boundary, [512, 512, 256],
                  cd_p,
                  overlap=offset,
                  box_coords=np.zeros(3),
                  fit_box_size=True)
    kd_pred.initialize_without_conf(kd_pred_p,
                                    kd.boundary,
                                    kd.scale,
                                    kd.experiment_name,
                                    mags=[1, 2, 4, 8])
    cd.export_cset_to_kd(kd_pred,
                         "pred", ["pred"], [4, 4],
                         as_raw=True,
                         stride=[256, 256, 256])
Пример #3
0
def prediction_helper(raw, model, override_mfp=True, imposed_patch_size=None):
    """
    Helper function for predicting raw volumes (range: 0 to 255; uint8).
    Will change X, Y, Z to ELEKTRONN format (Z, X, Y) and returns prediction
    in standard format [X, Y, Z]. Imposed patch size has to be given in Z, X, Y!

    Parameters
    ----------
    raw : np.array
        volume [X, Y, Z]
    model : str or model object
        path to model (.mdl)
    override_mfp : bool
    imposed_patch_size : tuple
        in Z, X, Y FORMAT!

    Returns
    -------
    np.array
        prediction data [X, Y, Z]
    """
    if type(model) == str:
        from elektronn2.neuromancer.model import modelload
        m = modelload(
            model,
            imposed_patch_size=list(imposed_patch_size) if isinstance(
                imposed_patch_size, tuple) else imposed_patch_size,
            override_mfp_to_active=override_mfp,
            imposed_batch_size=1)
        original_do_rates = m.dropout_rates
        m.dropout_rates = ([
            0.0,
        ] * len(original_do_rates))
    else:
        m = model
    raw = xyz2zxy(raw)
    if raw.dtype.kind in ('u', 'i'):
        # convert to float 32 and scale it
        raw = raw.astype(np.float32) / 255.
    if not raw.dtype == np.float32:
        # assume already normalized between 0 and 1
        raw = raw.astype(np.float32)
    assert 0 <= np.max(raw) <= 1.0 and 0 <= np.min(raw) <= 1.0
    pred = m.predict_dense(raw[None, ], pad_raw=True)[1]
    return zxy2xyz(pred)
Пример #4
0
if model_path in [None, 'None']:
    model_path = "~/axon/mkilling/investigation/MA-TEX/CNN-Timings/old_cnn_rec.mdl"

no_gc = False
mfp = False

model_path = os.path.expanduser(model_path)
model_dir, model_name = os.path.split(model_path)
os.chdir(model_dir)
f_name = model_name[:-4] + '-Speed.csv'

# Benchmark Model Compiled with static Shape #
if in_sh:
    model_static = modelload(model_path,
                             override_mfp_to_active=mfp,
                             imposed_patch_size=in_sh[2:],
                             imposed_batch_size=1,
                             make_weights_constant=True)
    try:
        val = np.random.rand(*in_sh).astype(np.float32)
        model_static.predict(val)
        t0 = time.time()
        for i in range(3):
            y = model_static.predict(val)

        t1 = time.time()
        n = np.prod(y.shape[2:])
        speed = float(n) / (t1 - t0) / 1e6 * 3
        n = np.prod(y.shape[2:])
        if len(in_sh) == 5:
            z = in_sh[2]
Пример #5
0
def predict_kzip(kzip_p,
                 m_path,
                 kd_path,
                 clf_thresh=0.5,
                 mfp_active=False,
                 dest_path=None,
                 overwrite=False,
                 gpu_ix=0,
                 imposed_patch_size=None):
    """
    Predicts data contained in k.zip file (defined by bounding box in knossos)

    Parameters
    ----------
    kzip_p : str
        path to kzip containing the raw data cube information
    m_path : str
        path to predictive model
    kd_path : str
        path to knossos dataset
    clf_thresh : float
        classification threshold
    overwrite : bool
    mfp_active : False
    imposed_patch_size : tuple
    dest_path : str
        path to destination folder, if None folder of k.zip is used.
    gpu_ix : int
    """
    cube_name = os.path.splitext(os.path.basename(kzip_p))[0]
    if dest_path is None:
        dest_path = os.path.dirname(kzip_p)
    if not os.path.isfile(dest_path + "/%s_data.h5" % cube_name) or overwrite:
        raw, labels = load_gt_from_kzip(kzip_p,
                                        kd_p=kd_path,
                                        raw_data_offset=0)
        raw = xyz2zxy(raw)
        initgpu(gpu_ix)
        from elektronn2.neuromancer.model import modelload
        m = modelload(
            m_path,
            imposed_patch_size=list(imposed_patch_size) if isinstance(
                imposed_patch_size, tuple) else imposed_patch_size,
            override_mfp_to_active=mfp_active,
            imposed_batch_size=1)
        original_do_rates = m.dropout_rates
        m.dropout_rates = ([
            0.0,
        ] * len(original_do_rates))
        pred = m.predict_dense(raw[None, ], pad_raw=True)[1]
        # remove area without sufficient FOV
        pred = zxy2xyz(pred)
        raw = zxy2xyz(raw)
        save_to_h5py([pred, raw], dest_path + "/%s_data.h5" % cube_name,
                     ["pred", "raw"])
    else:
        pred, raw = load_from_h5py(dest_path + "/%s_data.h5" % cube_name,
                                   hdf5_names=["pred", "raw"])
    offset = parse_movement_area_from_zip(kzip_p)[0]
    overlaycubes2kzip(dest_path + "/%s_pred.k.zip" % cube_name,
                      (pred >= clf_thresh).astype(np.uint32), offset, kd_path)
Пример #6
0
def _pred_dataset(kd_p,
                  kd_pred_p,
                  cd_p,
                  model_p,
                  imposed_patch_size=None,
                  mfp_active=False,
                  gpu_id=0,
                  overwrite=False,
                  i=None,
                  n=None):
    """
    Helper function for dataset prediction. Runs prediction on whole or partial
    knossos dataset. Imposed patch size has to be given in Z, X, Y!

    Parameters
    ----------
    kd_p : str
        path to knossos dataset .conf file
    kd_pred_p : str
        path to the knossos dataset head folder which will contain the prediction
    cd_p : str
        destination folder for chunk dataset containing prediction
    model_p : str
        path tho ELEKTRONN2 model
    imposed_patch_size : tuple or None
        patch size (Z, X, Y) of the model
    mfp_active : bool
        activate max-fragment pooling (might be necessary to change patch_size)
    gpu_id : int
        the GPU used
    overwrite : bool
        True: fresh predictions ; False: earlier prediction continues
        

    Returns
    -------

    """

    initgpu(gpu_id)
    from elektronn2.neuromancer.model import modelload
    kd = KnossosDataset()
    kd.initialize_from_knossos_path(kd_p, fixed_mag=1)

    m = modelload(model_p,
                  imposed_patch_size=list(imposed_patch_size) if isinstance(
                      imposed_patch_size, tuple) else imposed_patch_size,
                  override_mfp_to_active=mfp_active,
                  imposed_batch_size=1)
    original_do_rates = m.dropout_rates
    m.dropout_rates = ([
        0.0,
    ] * len(original_do_rates))
    offset = m.target_node.shape.offsets
    offset = np.array([offset[1], offset[2], offset[0]], dtype=np.int)
    cd = ChunkDataset()
    cd.initialize(kd,
                  kd.boundary, [512, 512, 256],
                  cd_p,
                  overlap=offset,
                  box_coords=np.zeros(3),
                  fit_box_size=True)

    ch_dc = cd.chunk_dict
    print('Total number of chunks for GPU/GPUs:', len(ch_dc.keys()))

    if i is not None and n is not None:
        chunks = ch_dc.values()[i::n]
    else:
        chunks = ch_dc.values()
    print("Starting prediction of %d chunks in gpu %d\n" %
          (len(chunks), gpu_id))

    if not overwrite:
        for chunk in chunks:
            try:
                _ = chunk.load_chunk("pred")[0]
            except Exception as e:
                chunk_pred(chunk, m)
    else:
        for chunk in chunks:
            try:
                chunk_pred(chunk, m)
            except KeyboardInterrupt as e:
                print("Exiting out from chunk prediction: ", str(e))
                return
    save_dataset(cd)

    # single gpu processing also exports the cset to kd
    if n is None:
        kd_pred = KnossosDataset()
        kd_pred.initialize_without_conf(kd_pred_p,
                                        kd.boundary,
                                        kd.scale,
                                        kd.experiment_name,
                                        mags=[1, 2, 4, 8])
        cd.export_cset_to_kd(kd_pred,
                             "pred", ["pred"], [4, 4],
                             as_raw=True,
                             stride=[256, 256, 256])
Пример #7
0
def predict_h5(h5_path,
               m_path,
               clf_thresh=None,
               mfp_active=False,
               gpu_ix=0,
               imposed_patch_size=None,
               hdf5_data_key=None,
               data_is_zxy=True,
               dest_p=None,
               dest_hdf5_data_key="pred",
               as_uint8=True):
    """
    Predicts data from h5 file. Assumes raw data is already float32.

    Parameters
    ----------
    h5_path : str
        path to h5 containing the raw data
    m_path : str
        path to predictive model
    clf_thresh : float
        classification threshold, if None, no thresholding
    mfp_active : False
    imposed_patch_size : tuple
    gpu_ix : int
    hdf5_data_key: str
        if None, it uses the first entry in the list returned by
        'load_from_h5py'
    data_is_zxy : bool
        if False, it will assumes data is [X, Y, Z]
    as_uint8: bool
    dest_p : str
    dest_hdf5_data_key : str
    """
    if hdf5_data_key:
        raw = load_from_h5py(h5_path, hdf5_names=[hdf5_data_key])[0]
    else:
        raw = load_from_h5py(h5_path, hdf5_names=None)
        assert len(raw) == 1, "'hdf5_data_key' not given but multiple hdf5 " \
                              "elements found. Please define raw data key."
        raw = raw[0]
    if not data_is_zxy:
        raw = xyz2zxy(raw)
    initgpu(gpu_ix)
    if raw.dtype.kind in ('u', 'i'):
        raw = raw.astype(np.float32) / 255.
    from elektronn2.neuromancer.model import modelload
    m = modelload(m_path,
                  imposed_patch_size=list(imposed_patch_size) if isinstance(
                      imposed_patch_size, tuple) else imposed_patch_size,
                  override_mfp_to_active=mfp_active,
                  imposed_batch_size=1)
    original_do_rates = m.dropout_rates
    m.dropout_rates = ([
        0.0,
    ] * len(original_do_rates))
    pred = m.predict_dense(raw[None, ], pad_raw=True)[1]
    pred = zxy2xyz(pred)
    raw = zxy2xyz(raw)
    if as_uint8:
        pred = (pred * 255).astype(np.uint8)
        raw = (raw * 255).astype(np.uint8)
    if clf_thresh:
        pred = (pred >= clf_thresh).astype(np.float32)
    if dest_p is None:
        dest_p = h5_path[:-3] + "_pred.h5"
    if hdf5_data_key is None:
        hdf5_data_key = "raw"
    save_to_h5py([raw, pred], dest_p, [hdf5_data_key, dest_hdf5_data_key])
Пример #8
0
def demo_new_gm():
    mfp = False
    in_sh = (1, 1, 15, 198, 198) if mfp else (1, 1, 25, 171, 171)
    inp = neuromancer.Input(in_sh, 'b,f,z,x,y', name='raw')

    out = neuromancer.Conv(inp,
                           20, (1, 6, 6), (1, 1, 1),
                           mfp=mfp,
                           batch_normalisation='train')
    out = neuromancer.Conv(out, 40, (1, 5, 5), (1, 2, 2), mfp=mfp)
    out = neuromancer.Conv(out, 50, (1, 4, 4), (1, 2, 2), mfp=mfp)
    out = neuromancer.Conv(out, 80, (1, 4, 4), (1, 1, 1), mfp=mfp)

    out = neuromancer.Conv(out, 80, (4, 1, 1), (2, 1, 1),
                           mfp=mfp)  # first z kernel, 2 pool
    out = neuromancer.Conv(out, 80, (3, 4, 4), (1, 1, 1), mfp=mfp)
    out = neuromancer.Conv(out, 80, (3, 4, 4), (1, 1, 1), mfp=mfp)
    out = neuromancer.Conv(out, 100, (2, 4, 4), (1, 1, 1), mfp=mfp)

    out = neuromancer.Conv(out, 120, (2, 4, 4), (1, 1, 1), mfp=mfp)
    out = neuromancer.Conv(out, 120, (1, 2, 2), (1, 1, 1), mfp=mfp)

    out = neuromancer.Conv(out, 120, (1, 1, 1), (1, 1, 1), mfp=mfp)
    out1, out2 = neuromancer.split(out, 1, n_out=2)

    probs = neuromancer.Conv(out1,
                             2, (1, 1, 1), (1, 1, 1),
                             mfp=mfp,
                             activation_func='lin')
    probs = neuromancer.Softmax(probs, name='probs')
    discard, mode = neuromancer.split(probs, 1, n_out=2)

    concentration = neuromancer.Conv(out2,
                                     1, (1, 1, 1), (1, 1, 1),
                                     mfp=mfp,
                                     activation_func='lin',
                                     name='concentration')
    t_sh = probs.shape.copy()
    t_sh.updateshape('f', 1)
    target = neuromancer.Input_like(t_sh, dtype='float32', name='target')

    loss_pix = neuromancer.BetaNLL(mode, concentration, target)
    loss = neuromancer.AggregateLoss(loss_pix)
    errors = neuromancer.Errors(probs, target, target_is_sparse=True)
    prediction = neuromancer.Concat([mode, concentration],
                                    axis=1,
                                    name='prediction')

    loss_std = neuromancer.ApplyFunc(loss_pix, T.std)

    model = neuromancer.model_manager.getmodel()
    model.designate_nodes(input_node=inp,
                          target_node=target,
                          loss_node=loss,
                          prediction_node=prediction,
                          prediction_ext=[loss, errors, prediction])

    ### --- ###

    model2 = neuromancer.model_manager.newmodel("second")
    inp2 = neuromancer.Input(in_sh, 'b,f,z,x,y', name='raw')

    out2 = neuromancer.Conv(inp2, 20, (1, 6, 6), (1, 1, 1), mfp=mfp)
    out2 = neuromancer.Conv(out2, 40, (1, 5, 5), (1, 2, 2), mfp=mfp)
    out2 = neuromancer.Conv(out2, 50, (1, 4, 4), (1, 2, 2), mfp=mfp)

    out2 = neuromancer.Conv(out2, 120, (2, 4, 4), (1, 1, 1), mfp=mfp)
    out2 = neuromancer.Conv(out2, 120, (1, 2, 2), (1, 1, 1), mfp=mfp)

    out2 = neuromancer.Conv(out2, 120, (1, 1, 1), (1, 1, 1), mfp=mfp)

    probs2 = neuromancer.Conv(out2,
                              2, (1, 1, 1), (1, 1, 1),
                              mfp=mfp,
                              activation_func='lin')
    probs2 = neuromancer.Softmax(probs2, name='probs')
    t_sh = probs2.shape.copy()
    t_sh.updateshape('f', 1)
    target2 = neuromancer.Input_like(t_sh, dtype='float32', name='target')

    loss_pix2 = neuromancer.MultinoulliNLL(probs2, target2)
    loss2 = neuromancer.AggregateLoss(loss_pix2)
    errors2 = neuromancer.Errors(probs2, target2, target_is_sparse=True)
    model2.designate_nodes(input_node=inp2,
                           target_node=target2,
                           loss_node=loss2,
                           prediction_node=probs2,
                           prediction_ext=[loss2, errors2, probs2])

    model.save('/tmp/test.pkl')
    model2.save('/tmp/test2.pkl')
    model2_reloaded = modelload('/tmp/test2.pkl')
    model2_reloaded.save('/tmp/test2_reloaded.pkl')

    print(neuromancer.model_manager)