Пример #1
0
def cvpr2018_net(vol_size,
                 enc_nf,
                 dec_nf,
                 full_size=True,
                 indexing='ij',
                 use_seg=False,
                 n_seg=2):
    """
    unet architecture for voxelmorph models presented in the CVPR 2018 paper. 
    You may need to modify this code (e.g., number of layers) to suit your project needs.

    :param vol_size: volume size. e.g. (256, 256, 256)
    :param enc_nf: list of encoder filters. right now it needs to be 1x4.
           e.g. [16,32,32,32]
    :param dec_nf: list of decoder filters. right now it must be 1x6 (like voxelmorph-1) or 1x7 (voxelmorph-2)
    :return: the keras model
    """
    ndims = len(vol_size)
    assert ndims in [1, 2,
                     3], "ndims should be one of 1, 2, or 3. found: %d" % ndims

    # get the core model
    unet_model = unet_core(vol_size, enc_nf, dec_nf, full_size=full_size)
    [src, tgt] = unet_model.inputs
    x = unet_model.output

    # transform the results into a flow field.
    Conv = getattr(KL, 'Conv%dD' % ndims)
    flow = Conv(ndims,
                kernel_size=3,
                padding='same',
                name='flow',
                kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x)

    # warp the source with the flow
    y = nrn_layers.SpatialTransformer(interp_method='linear',
                                      indexing=indexing)([src, flow])
    # prepare model
    if not use_seg:
        model = Model(inputs=[src, tgt], outputs=[y, flow])
    else:
        downfac = 2
        src_seg = Input(shape=(vol_size[0] // downfac, vol_size[1] // downfac,
                               vol_size[2] // downfac, n_seg))

        flow_dn = Lambda(interp_downsampling)(flow)
        flow_dn = Lambda(lambda arg: arg / 2.0)(flow_dn)
        y_seg = nrn_layers.SpatialTransformer(
            interp_method='linear', indexing=indexing)([src_seg, flow_dn])

        model = Model(inputs=[src, tgt, src_seg], outputs=[y, flow, y_seg])
    return model
Пример #2
0
def cvpr2018_net(vol_size, enc_nf, dec_nf, full_size=True, indexing='ij'):
    """
    unet architecture for voxelmorph models presented in the CVPR 2018 paper. 
    You may need to modify this code (e.g., number of layers) to suit your project needs.

    :param vol_size: volume size. e.g. (256, 256, 256)  -> 160x192x224
    :param enc_nf: list of encoder filters. right now it needs to be 1x4.
           e.g. [16,32,32,32]
    :param dec_nf: list of decoder filters. right now it must be 1x6 (like voxelmorph-1) or 1x7 (voxelmorph-2)
    :return: the keras model e.g.[32, 32, 32, 32, 32, 16, 16]
    """
    ndims = len(vol_size) # -> 3
    assert ndims in [1, 2, 3], "ndims should be one of 1, 2, or 3. found: %d" % ndims

    # get the core model
    unet_model = unet_core(vol_size, enc_nf, dec_nf, full_size=full_size)
    [src, tgt] = unet_model.inputs
    x = unet_model.output

    # transform the results into a flow field.
    Conv = getattr(KL, 'Conv%dD' % ndims)
    flow = Conv(ndims, kernel_size=3, padding='same', name='flow',
                  kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x)

    # warp the source with the flow
    # indexing = 'ij'
    y = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([src, flow])
    # print(y.shape) (?, 160, 192, 224, 1)
    # src: ?x160x192x224x1  flow: ?x160x192x224x3
    # prepare model
    model = Model(inputs=[src, tgt], outputs=[y, flow])
    return model
Пример #3
0
def cvpr2018_net(vol_size, enc_nf, dec_nf, full_size=True, indexing='ij'):

    ndims = len(vol_size)
    assert ndims in [1, 2,
                     3], "ndims should be one of 1, 2, or 3. found: %d" % ndims

    # get the core model
    unet_model = unet_core(vol_size, enc_nf, dec_nf, full_size=full_size)
    [src, tgt] = unet_model.inputs
    x = unet_model.output

    # transform the results into a flow field.
    Conv = getattr(KL, 'Conv%dD' % ndims)
    flow = Conv(ndims,
                kernel_size=3,
                padding='same',
                name='flow',
                kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x)

    # warp the source with the flow
    y = nrn_layers.SpatialTransformer(interp_method='linear',
                                      indexing=indexing)([src, flow])
    # prepare model
    model = Model(inputs=[src, tgt], outputs=[y, flow])
    return model
Пример #4
0
def trf_core(vol_size,
             interp_method='linear',
             indexing='ij',
             nb_feats=1,
             int_steps=0):
    """
    Simple transform model 
    Note: this is essentially a wrapper for the neuron.utils.transform(..., interp_method='nearest')
    """
    ndims = len(vol_size)

    # nn warp model
    subj_input = Input((*vol_size, nb_feats), name='subj_input')
    trf_input = Input((*vol_size, ndims), name='trf_input')

    if int_steps > 0:
        trf = nrn_layers.VecInt(method='ss',
                                name='trf-int',
                                int_steps=int_steps)(trf_input)
    else:
        trf = trf_input

    # note the nearest neighbour interpolation method
    # use xy indexing when Guha's original code switched x and y dimensions
    nn_output = nrn_layers.SpatialTransformer(interp_method=interp_method,
                                              indexing=indexing)
    nn_spatial_output = nn_output([subj_input, trf])
    return keras.models.Model([subj_input, trf_input], nn_spatial_output)
Пример #5
0
def interp_upsampling(V):
    """
    upsample a field by a factor of 2
    TODO: should switch this to use neuron.utils.interpn()
    """
    #print(V.shape)
    grid = nrn_utils.volshape_to_ndgrid(
        [f * 2 for f in V.get_shape().as_list()[1:-1]])
    grid = [tf.cast(f, 'float32') for f in grid]
    grid = [tf.expand_dims(f / 2 - f, 0) for f in grid]
    offset = tf.stack(grid, len(grid) + 1)
    #xx = tf.cast(xx, 'float32')
    #yy = tf.cast(yy, 'float32')
    #zz = tf.cast(zz, 'float32')
    #xx = tf.expand_dims(xx/2-xx, 0)
    #yy = tf.expand_dims(yy/2-yy, 0)
    #zz = tf.expand_dims(zz/2-zz, 0)
    #print(xx.shape)
    #offset = tf.stack([xx, yy], 3)
    #print(offset.shape)
    #print(V.shape)

    # V = nrn_utils.transform(V, offset)
    V = nrn_layers.SpatialTransformer(interp_method='linear')([V, offset])
    #print(V.shape)

    return V
def unet(vol_size, enc_nf, dec_nf, full_size=True):
    """
    unet architecture for voxelmorph models presented in the CVPR 2018 paper. 
    You may need to modify this code (e.g., number of layers) to suit your project needs.

    :param vol_size: volume size. e.g. (256, 256, 256)
    :param enc_nf: list of encoder filters. right now it needs to be 1x4.
           e.g. [16,32,32,32]
    :param dec_nf: list of decoder filters. right now it must be 1x6 (like voxelmorph-1) or 1x7 (voxelmorph-2)
    :return: the keras model
    """

    # get the core model
    unet_model = unet_core(vol_size, enc_nf, dec_nf, full_size=full_size)
    [src, tgt] = unet_model.inputs
    x = unet_model.output

    # transform the results into a flow field.
    flow = Conv3D(3, kernel_size=3, padding='same',
                  kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5), name='flow')(x)

    # warp the source with the flow
    y = nrn_layers.SpatialTransformer(interp_method='linear', indexing='xy')([src, flow])
    # prepare model
    model = Model(inputs=[src, tgt], outputs=[y, flow])
    return model
Пример #7
0
def nn_trf(vol_size, indexing='xy'):

    ndims = len(vol_size)

    # nn warp model
    subj_input = Input((*vol_size, 1), name='subj_input')
    trf_input = Input((*vol_size, ndims), name='trf_input')

    # note the nearest neighbour interpolation method
    # note xy indexing because Guha's original code switched x and y dimensions
    nn_output = nrn_layers.SpatialTransformer(interp_method='nearest',
                                              indexing=indexing)
    nn_spatial_output = nn_output([subj_input, trf_input])
    return keras.models.Model([subj_input, trf_input], nn_spatial_output)
Пример #8
0
def interp_upsampling(V):
    """ 
    upsample a field by a factor of 2
    TODO: should switch this to use neuron.utils.interpn()
    """

    grid = nrn_utils.volshape_to_ndgrid([f*2 for f in V.get_shape().as_list()[1:-1]])
    grid = [tf.cast(f, 'float32') for f in grid]
    grid = [tf.expand_dims(f/2 - f, 0) for f in grid]
    offset = tf.stack(grid, len(grid) + 1)

    # V = nrn_utils.transform(V, offset)
    V = nrn_layers.SpatialTransformer(interp_method='linear')([V, offset])
    return V
Пример #9
0
    def _lseg(self, warp):
        print('warp is:', warp)
        Sm_ten = tf.convert_to_tensor(self.Sm_)
        Sm_tensor = tf.to_float(Sm_ten, name='ToFloat')
        # print(Sm_tensor)
        y = nrn_layers.SpatialTransformer(interp_method='nearest',
                                          indexing='ij')([Sm_tensor,
                                                          warp])  # nearest
        # print(y)

        Sf_ten = tf.convert_to_tensor(self.Sf_)
        Sf_tensor = tf.to_float(Sf_ten, name='ToFloat')
        # print(Sf_tensor)

        dicem = binary_dice(Sf_tensor, y)
        return dicem
def nn_trf(vol_size):
    """
    Simple transform model for nearest-neighbor based transformation
    Note: this is essentially a wrapper for the neuron.utils.transform(..., interp_method='nearest')
    """
    ndims = len(vol_size)

    # nn warp model
    subj_input = Input((*vol_size, 1), name='subj_input')
    trf_input = Input((*vol_size, ndims) , name='trf_input')

    # note the nearest neighbour interpolation method
    # note xy indexing because Guha's original code switched x and y dimensions
    nn_output = nrn_layers.SpatialTransformer(interp_method='nearest', indexing='xy')
    nn_spatial_output = nn_output([subj_input, trf_input])
    return keras.models.Model([subj_input, trf_input], nn_spatial_output)
    def make_flow(self):
        [Atlas, Moving] = self.unet_model.inputs
        x = self.unet_model.output

        flow = Conv3D(3,
                      kernel_size=3,
                      padding='same',
                      name='flow',
                      kernel_initializer=RandomNormal(mean=0.0,
                                                      stddev=1e-5))(x)

        # warp the source with the flow
        y = nrn_layers.SpatialTransformer(
            interp_method='linear', indexing=self.indexing)([Moving, flow])
        # prepare model
        self.model = Model(inputs=[Atlas, Moving], outputs=[y, flow])
Пример #12
0
def ncc_l(I, J, flow, l):
    """
       local (over window) normalized cross correlation
    """
    eps = 1e-5
    J = nrn_layers.SpatialTransformer(interp_method='linear', indexing='ij')([J, flow])
    ndims = len(I.get_shape().as_list()) - 2
    assert ndims in [1, 2, 3], "volumes should be 1 to 3 dimensions. found: %d" % ndims

    # set window size
    win = [l] * ndims

    # get convolution function
    conv_fn = getattr(tf.nn, 'conv%dd' % ndims)

    # compute CC squares
    I2 = I * I
    J2 = J * J
    IJ = I * J

    # compute filters
    sum_filt = tf.ones([*win, 1, 1])
    strides = [1] * (ndims + 2)
    padding = 'SAME'

    # compute local sums via convolution
    I_sum = conv_fn(I, sum_filt, strides, padding)
    J_sum = conv_fn(J, sum_filt, strides, padding)
    I2_sum = conv_fn(I2, sum_filt, strides, padding)
    J2_sum = conv_fn(J2, sum_filt, strides, padding)
    IJ_sum = conv_fn(IJ, sum_filt, strides, padding)

    # compute cross correlation
    win_size = np.prod(win)
    u_I = I_sum / win_size
    u_J = J_sum / win_size

    cross = IJ_sum - u_J * I_sum - u_I * J_sum + u_I * u_J * win_size
    I_var = I2_sum - 2 * u_I * I_sum + u_I * u_I * win_size
    J_var = J2_sum - 2 * u_J * J_sum + u_J * u_J * win_size

    cc = cross * cross / (I_var * J_var + eps)

    # return negative cc.
    return -tf.reduce_mean(cc)
Пример #13
0
    def __call__(self, images_0, images_1, reuse=False):# reuse=False
        with tf.variable_scope(self.name, reuse=reuse) as vs:
            x_in = tf.concat([images_1, images_0], axis=-1)

            # down-sample path (encoder)
            x_enc = [x_in]
            for i in range(len(self.enc_nf)):
                x_enc.append(self.conv_block(x_enc[-1], self.enc_nf[i], 2))

            # up-sample path (decoder)
            x = self.conv_block(x_enc[-1], self.dec_nf[0])
            x = UpSampling3D()(x)
            x = tf.concat([x, x_enc[-2]], axis=-1)
            x = self.conv_block(x, self.dec_nf[1])
            x = UpSampling3D()(x)
            x = tf.concat([x, x_enc[-3]], axis=-1)
            x = self.conv_block(x, self.dec_nf[2])
            x = UpSampling3D()(x)
            x = tf.concat([x, x_enc[-4]], axis=-1)
            x = self.conv_block(x, self.dec_nf[3])
            x = self.conv_block(x, self.dec_nf[4])

            # only upsampleto full dim if full_size
            # here we explore architectures where we essentially work with flow fields
            # that are 1/2 size

            x = UpSampling3D()(x)
            x = tf.concat([x, x_enc[0]], axis=-1)
            x = self.conv_block(x, self.dec_nf[5])

            # optional convolution at output resolution (used in voxelmorph-2)
            if len(self.dec_nf) == 7:
                x = self.conv_block(x, self.dec_nf[6])

            # transform the results into a flow field.
            # flow = tf.layers.Conv3D(3, kernel_size=3, padding='same', name='flow',
                                    # kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x)
            flow = tf.layers.Conv3D(3, (3, 3, 3), (1, 1, 1), 'same', kernel_initializer='he_normal')(x)

            # warp the source with the flow
            y = nrn_layers.SpatialTransformer(interp_method='linear', indexing='ij')([images_1, flow])

            return flow, y
def interp_upsampling(V):
    """ 
    upsample a field by a factor of 2
    TODO: should switch this to use neuron.utils.interpn()
    """

    [xx, yy, zz] = nrn_utils.volshape_to_ndgrid([f*2 for f in V.get_shape().as_list()[1:4]])
    xx = tf.cast(xx, 'float32')
    yy = tf.cast(yy, 'float32')
    zz = tf.cast(zz, 'float32')
    xx = tf.expand_dims(xx/2-xx, 0)
    yy = tf.expand_dims(yy/2-yy, 0)
    zz = tf.expand_dims(zz/2-zz, 0)
    offset = tf.stack([xx, yy, zz], 4)

    # V = nrn_utils.transform(V, offset)
    V = nrn_layers.SpatialTransformer(interp_method='linear')([V, offset])

    return V
Пример #15
0
    def __call__(self, images_0, images_1, reuse=False):  # reuse=False
        with tf.variable_scope(self.name, reuse=reuse) as vs:
            pyramid_0, pyramid_params_0 = self.fp_extractor(images_0,
                                                            reuse=reuse)
            pyramid_1, pyramid_params_1 = self.fp_extractor(images_1)
            flows_pyramid = []
            flows_up, features_up = None, None

            for l, (features_0,
                    features_1) in enumerate(zip(pyramid_0, pyramid_1)):

                # Flow estimation
                flows = self.of_estimator[l](features_0, features_1, flows_up)

                # Integrate if diffeomorphic (i.e. treating 'flow' above as stationary velocity field)
                z_sample = flows
                flows = nrn_layers.VecInt(method='ss',
                                          name='flow-int',
                                          int_steps=self.int_steps)(z_sample)

                if l < self.output_level:
                    # up-sample
                    flows_up = nrn_layers.Resize(zoom_factor=2,
                                                 interp_method='linear')(
                                                     flows * 2)
                else:
                    # At output level
                    flows_pyramid.append(flows)
                    # Obtain finally scale-adjusted flow
                    upscale = 2**(self.num_levels - self.output_level)
                    flows_final = nrn_layers.Resize(zoom_factor=upscale,
                                                    interp_method='linear')(
                                                        flows * upscale)
                    y = nrn_layers.SpatialTransformer(interp_method='linear',
                                                      indexing='ij')([
                                                          images_1, flows_final
                                                      ])
                    return flows_final, y, flows_pyramid, pyramid_params_0, pyramid_params_1

                flows_pyramid.append(flows)
def miccai2018_net(vol_size, enc_nf, dec_nf, use_miccai_int=True, int_steps=7, indexing='xy'):
    """
    architecture for probabilistic diffeomoprhic VoxelMorph presented in the MICCAI 2018 paper. 
    You may need to modify this code (e.g., number of layers) to suit your project needs.

    The stationary velocity field operates in a space (0.5)^3 of vol_size for computational reasons.

    :param vol_size: volume size. e.g. (256, 256, 256)
    :param enc_nf: list of encoder filters. right now it needs to be 1x4.
           e.g. [16,32,32,32]
    :param dec_nf: list of decoder filters. right now it must be 1x6, see unet function.
    :param use_miccai_int: whether to use the manual miccai implementation of scaling and squaring integration
            note that the 'velocity' field outputted in that case was 
            since then we've updated the code to be part of a flexible layer. see neuron.layers.VecInt
    :param int_steps: the number of integration steps
    :param indexing: xy or ij indexing. we recommend ij indexing if training from scratch. 
            miccai 2018 runs were done with xy indexing.
    :return: the keras model
    """    
    
    # get unet
    unet_model = unet_core(vol_size, enc_nf, dec_nf, full_size=False)
    [src,tgt] = unet_model.inputs
    x_out = unet_model.outputs[-1]

    # velocity mean and logsigma layers
    flow_mean = Conv3D(3, kernel_size=3, padding='same',
                       kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5), name='flow')(x_out)

    flow_log_sigma = Conv3D(3, kernel_size=3, padding='same',
                            kernel_initializer=RandomNormal(mean=0.0, stddev=1e-10),
                            bias_initializer=keras.initializers.Constant(value=-10), name='log_sigma')(x_out)
    flow_params = concatenate([flow_mean, flow_log_sigma])

    # velocity sample
    flow = Lambda(sample, name="z_sample")([flow_mean, flow_log_sigma])

    # integrate if diffeomorphic (i.e. treating 'flow' above as stationary velocity field)
    if use_miccai_int:
        # for the miccai2018 submission, the scaling and squaring layer
        # was manually composed of a Transform and and Add Layer.
        flow = Lambda(lambda x: x, name='flow-fix')(flow)  # remanant of old code
        v = flow
        for _ in range(int_steps):
            v1 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([v, v])
            v = keras.layers.add([v, v1])
        flow = v

    else:
        # new implementation in neuron is cleaner.
        # the 2**int_steps is a correcting factor left over from the miccai implementation.
        # * (2**int_steps)
        flow = Lambda(lambda x: x, name='flow-fix')(flow)
        flow = nrn_layers.VecInt(method='ss', name='flow-int', int_steps=7)(flow)       

    # get up to final resolution
    flow = Lambda(interp_upsampling, output_shape=vol_size+(3,), name='pre_diffflow')(flow)
    flow = Lambda(lambda arg: arg*2, name='diffflow')(flow)

    # transform
    y = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([src, flow])

    # prepare outputs and losses
    outputs = [y, flow_params]

    # build the model
    return Model(inputs=[src, tgt], outputs=outputs)
Пример #17
0
 def __call__(self, images_1_seg, flow, indexing='ij'):
     out = nrn_layers.SpatialTransformer(
         interp_method='nearest', indexing=indexing)([images_1_seg, flow])
     return out
Пример #18
0
def miccai2018_net(vol_size,
                   enc_nf,
                   dec_nf,
                   int_steps=7,
                   use_miccai_int=False,
                   indexing='ij',
                   bidir=False,
                   vel_resize=1 / 2):
    """
    architecture for probabilistic diffeomoprhic VoxelMorph presented in the MICCAI 2018 paper. 
    You may need to modify this code (e.g., number of layers) to suit your project needs.

    The stationary velocity field operates in a space (0.5)^3 of vol_size for computational reasons.

    :param vol_size: volume size. e.g. (256, 256, 256)
    :param enc_nf: list of encoder filters. right now it needs to be 1x4.
           e.g. [16,32,32,32]
    :param dec_nf: list of decoder filters. right now it must be 1x6, see unet function.
    :param use_miccai_int: whether to use the manual miccai implementation of scaling and squaring integration
            note that the 'velocity' field outputted in that case was 
            since then we've updated the code to be part of a flexible layer. see neuron.layers.VecInt
            **This param will be phased out (set to False behavior)**
    :param int_steps: the number of integration steps
    :param indexing: xy or ij indexing. we recommend ij indexing if training from scratch. 
            miccai 2018 runs were done with xy indexing.
            **This param will be phased out (set to 'ij' behavior)**
    :return: the keras model
    """
    ndims = len(vol_size)
    assert ndims in [1, 2,
                     3], "ndims should be one of 1, 2, or 3. found: %d" % ndims

    # get unet
    unet_model = unet_core(vol_size, enc_nf, dec_nf, full_size=False)
    [src, tgt] = unet_model.inputs
    x_out = unet_model.outputs[-1]

    # velocity mean and logsigma layers
    Conv = getattr(KL, 'Conv%dD' % ndims)
    flow_mean = Conv(ndims,
                     kernel_size=3,
                     padding='same',
                     kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5),
                     name='flow')(x_out)
    # we're going to initialize the velocity variance very low, to start stable.
    flow_log_sigma = Conv(
        ndims,
        kernel_size=3,
        padding='same',
        kernel_initializer=RandomNormal(mean=0.0, stddev=1e-10),
        bias_initializer=keras.initializers.Constant(value=-10),
        name='log_sigma')(x_out)
    flow_params = concatenate([flow_mean, flow_log_sigma])

    # velocity sample
    flow = Sample(name="z_sample")([flow_mean, flow_log_sigma])

    # integrate if diffeomorphic (i.e. treating 'flow' above as stationary velocity field)
    if use_miccai_int:
        # for the miccai2018 submission, the squaring layer
        # scaling was essentially built in by the network
        # was manually composed of a Transform and and Add Layer.
        v = flow
        for _ in range(int_steps):
            v1 = nrn_layers.SpatialTransformer(interp_method='linear',
                                               indexing=indexing)([v, v])
            v = keras.layers.add([v, v1])
        flow = v

    else:
        # new implementation in neuron is cleaner.
        z_sample = flow
        flow = nrn_layers.VecInt(method='ss',
                                 name='flow-int',
                                 int_steps=int_steps)(z_sample)
        if bidir:
            rev_z_sample = Negate()(z_sample)
            neg_flow = nrn_layers.VecInt(method='ss',
                                         name='neg_flow-int',
                                         int_steps=int_steps)(rev_z_sample)

    # get up to final resolution
    flow = trf_resize(flow, vel_resize, name='diffflow')

    if bidir:
        neg_flow = trf_resize(neg_flow, vel_resize, name='neg_diffflow')

    # transform
    y = nrn_layers.SpatialTransformer(interp_method='linear',
                                      indexing=indexing)([src, flow])
    if bidir:
        y_tgt = nrn_layers.SpatialTransformer(
            interp_method='linear', indexing=indexing)([tgt, neg_flow])

    # prepare outputs and losses
    outputs = [y, flow_params]
    if bidir:
        outputs = [y, y_tgt, flow_params]

    # build the model
    return Model(inputs=[src, tgt], outputs=outputs)
def rigid_net(vol_size, enc_nf, dec_nf):
    """
    architecture for rigid registration.
    rigid registration: CNN output ND x ND+1 affine matrix, affine matrix is passed to affine_to_shift function
    :param vol_size: volume size
    :param enc_nf: list of encoder
    :param dec_nf: list of decoder
    :return: model
    """
    unet_model = unet_core(vol_size, enc_nf, dec_nf, full_size=False)
    [src, tgt] = unet_model.inputs
    x_out = unet_model.outputs[-1]
    # affine transform matrix
    # build full connected layer into the model, output the ND x ND+1 affine matrix
    flow = Conv3D(3,
                  kernel_size=3,
                  padding='same',
                  kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5),
                  name='flow')(x_out)
    #flow = LeakyReLU(0.2)(flow)

    flow1 = Lambda(reduce_dim1)(flow)
    flow1 = Lambda(my_reshape)(flow1)
    print("flow1's shape is: " + str(flow1.shape))

    flow2 = Lambda(reduce_dim2)(flow)
    flow2 = Lambda(my_reshape)(flow2)

    flow3 = Lambda(reduce_dim3)(flow)
    flow3 = Lambda(my_reshape)(flow3)

    # add convolutinal layer into the model, which outputs affine matrix.
    affine_matrix1 = Conv3D(filters=4,
                            kernel_size=(80, 96, 112),
                            padding='valid',
                            kernel_initializer=RandomNormal(mean=0.0,
                                                            stddev=1e-5),
                            name='flow1')(flow1)
    affine_matrix1 = Reshape((1, 4))(affine_matrix1)
    #affine_matrix1 = LeakyReLU(0.2)(affine_matrix1)

    print("affine_matrix1's shape is: " + str(affine_matrix1.shape))
    affine_matrix2 = Conv3D(filters=4,
                            kernel_size=(80, 96, 112),
                            padding='valid',
                            kernel_initializer=RandomNormal(mean=0.0,
                                                            stddev=1e-5),
                            name='flow2')(flow2)
    affine_matrix2 = Reshape((1, 4))(affine_matrix2)
    #affine_matrix2 = LeakyReLU(0.2)(affine_matrix2)

    affine_matrix3 = Conv3D(filters=4,
                            kernel_size=(80, 96, 112),
                            padding='valid',
                            kernel_initializer=RandomNormal(mean=0.0,
                                                            stddev=1e-5),
                            name='flow3')(flow3)
    affine_matrix3 = Reshape((1, 4))(affine_matrix3)
    #affine_matrix3 = LeakyReLU(0.2)(affine_matrix3)

    affine_matrix = concatenate([affine_matrix1, affine_matrix2])
    affine_matrix = concatenate([affine_matrix, affine_matrix3])

    affine_matrix = Lambda(reduce_dim4)(affine_matrix)
    print("affine_matrix's shape is :" + str(affine_matrix.shape))
    # spatial transform
    y = nrn_layers.SpatialTransformer(interp_method='linear',
                                      indexing='xy')([src, affine_matrix])
    model = Model(inputs=[src, tgt], outputs=[y, flow])
    print("the output's shape is:" + str(y.shape))
    return model
Пример #20
0
 def keras_block(flow, src, interp_method='linear'):
     y = nrn_layers.SpatialTransformer(interp_method=interp_method,
                                       indexing=indexing)([src, flow])
     return y
Пример #21
0
def miccai2018_net(vol_size, enc_nf, dec_nf, int_steps=6, indexing='ij', vel_resize=0.5):
    """
    architecture for probabilistic diffeomoprhic VoxelMorph presented in the MICCAI 2018 paper. 
    You may need to modify this code (e.g., number of layers) to suit your project needs.

    The stationary velocity field operates in a space (0.5)^3 of vol_size for computational reasons.

    :param vol_size: volume size. e.g. (256, 256, 256)
    :param enc_nf: list of encoder filters. right now it needs to be 1x4.
           e.g. [16,32,32,32]
    :param dec_nf: list of decoder filters. right now it must be 1x6, see unet function.
    :param use_miccai_int: whether to use the manual miccai implementation of scaling and squaring integration
            note that the 'velocity' field outputted in that case was 
            since then we've updated the code to be part of a flexible layer. see neuron.layers.VecInt
            **This param will be phased out (set to False behavior)**
    :param int_steps: the number of integration steps
    :param indexing: xy or ij indexing. we recommend ij indexing if training from scratch. 
            miccai 2018 runs were done with xy indexing.
            **This param will be phased out (set to 'ij' behavior)**
    :return: the keras model
    """

    ndims = len(vol_size)
    assert ndims in [1, 2, 3], "ndims should be one of 1, 2, or 3. found: %d" % ndims

    # get unet
    unet_model = unet_core(vol_size, enc_nf, dec_nf, vel_resize)
    
    # target delta in binary representation
    b_in = Input(shape=(16,)) 

    x_in = unet_model.inputs[0]
    x_out = unet_model.outputs[-1]

    # velocity mean and logsigma layers
    Conv = getattr(KL, 'Conv%dD' % ndims)
    flow_mean = Conv(ndims, kernel_size=3, padding='same',
                    kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5), name='flow')(x_out)
    
    # we're going to initialize the velocity variance very low, to start stable.
    flow_log_sigma = Conv(ndims, kernel_size=3, padding='same',
                            kernel_initializer=RandomNormal(mean=0.0, stddev=1e-10),
                            bias_initializer=keras.initializers.Constant(value=-10),
                            name='log_sigma')(x_out)
    
    flow_params = concatenate([flow_mean, flow_log_sigma])

    # velocity sample
    flow = Sample(name="z_sample")([flow_mean, flow_log_sigma])

    # integrate if diffeomorphic (i.e. treating 'flow' above as stationary velocity field)
    z_sample = flow
    flow = nrn_layers.VecInt(method='ss', name='flow-int', int_steps=int_steps)([z_sample, b_in])

    # get up to final resolution
    if vel_resize != 1.0:
        flow = trf_resize(flow, vel_resize, name='diff-flow')

    # transform
    y = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([x_in, flow])

    # prepare outputs
    outputs = [y, flow_params]

    # build the model
    return Model(inputs=[x_in, b_in], outputs=outputs)
Пример #22
0
def cvpr2018_net_probatlas(vol_size, enc_nf, dec_nf, nb_labels,
                           diffeomorphic=True,
                           full_size=True,
                           indexing='ij',
                           init_mu=None,
                           init_sigma=None,
                           stat_post_warp=False,  # compute statistics post warp?
                           network_stat_weight=0.001,
                           warp_method='WARP',
                           stat_nb_feats=16):
    """
    Network to do unsupervised segmentation with probabilistic atlas
    (Dalca et al., submitted to MICCAI 2019)
    """
    # print(warp_method)
    ndims = len(vol_size)
    assert ndims in [1, 2, 3], "ndims should be one of 1, 2, or 3. found: %d" % ndims
    weaknorm = RandomNormal(mean=0.0, stddev=1e-5)

    # get the core model
    unet_model = unet_core(vol_size, enc_nf, dec_nf, full_size=full_size, tgt_feats=nb_labels)
    [src_img, src_atl] = unet_model.inputs
    x = unet_model.output

    # transform the results into a flow field.
    Conv = getattr(KL, 'Conv%dD' % ndims)
    flow1 = Conv(ndims, kernel_size=3, padding='same', name='flow', kernel_initializer=weaknorm)(x)
    if diffeomorphic:
        flow2 = nrn_layers.VecInt(method='ss', name='flow-int', int_steps=8)(flow1)
    else:
        flow2 = flow1
    if full_size:
        flow = flow2
    else:
        flow = trf_resize(flow2, 1/2, name='diffflow')

    # warp atlas
    if warp_method == 'WARP':
        warped_atlas = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing, name='warped_atlas')([src_atl, flow])
    else:
        warped_atlas = src_atl

    if stat_post_warp:
        assert warp_method == 'WARP', "if computing stat post warp, must do warp... :) set warp_method to 'WARP' or stat_post_warp to False?"

        # combine warped atlas and warpedimage and output mu and log_sigma_squared
        combined = concatenate([warped_atlas, src_img])
    else:
        combined = unet_model.layers[-2].output

    conv1 = conv_block(combined, stat_nb_feats)
    conv2 = conv_block(conv1, nb_labels)
    stat_mu_vol = Conv(nb_labels, kernel_size=3, name='mu_vol',
                    kernel_initializer=weaknorm, bias_initializer=weaknorm)(conv2)
    stat_mu = keras.layers.GlobalMaxPooling3D()(stat_mu_vol)
    stat_logssq_vol = Conv(nb_labels, kernel_size=3, name='logsigmasq_vol',
                        kernel_initializer=weaknorm, bias_initializer=weaknorm)(conv2)
    stat_logssq = keras.layers.GlobalMaxPooling3D()(stat_logssq_vol)

    # combine mu with initializtion
    if init_mu is not None: 
        init_mu = np.array(init_mu)
        stat_mu = Lambda(lambda x: network_stat_weight * x + init_mu, name='comb_mu')(stat_mu)
    
    # combine sigma with initializtion
    if init_sigma is not None: 
        init_logsigmasq = np.array([2*np.log(f) for f in init_sigma])
        stat_logssq = Lambda(lambda x: network_stat_weight * x + init_logsigmasq, name='comb_sigma')(stat_logssq)

    # unnorm log-lik
    def unnorm_loglike(I, mu, logsigmasq, uselog=True):
        P = tf.distributions.Normal(mu, K.exp(logsigmasq/2))
        if uselog:
            return P.log_prob(I)
        else:
            return P.prob(I)

    uloglhood = KL.Lambda(lambda x:unnorm_loglike(*x), name='unsup_likelihood')([src_img, stat_mu, stat_logssq])

    # compute data loss as a layer, because it's a bit easier than outputting a ton of things, etc.
    # def logsum(ll, atl):
    #     pdf = ll * atl
    #     return tf.log(tf.reduce_sum(pdf, -1, keepdims=True) + K.epsilon())

    def logsum_safe(prob_ll, atl):
        """
        safe computation using the log sum exp trick
        e.g. https://www.xarg.org/2016/06/the-log-sum-exp-trick-in-machine-learning/
        where x = logpdf

        note does not normalize p 
        """
        logpdf = prob_ll + K.log(atl + K.epsilon())
        alpha = tf.reduce_max(logpdf, -1, keepdims=True)
        return alpha + tf.log(tf.reduce_sum(K.exp(logpdf-alpha), -1, keepdims=True) + K.epsilon())

    loss_vol = Lambda(lambda x: logsum_safe(*x))([uloglhood, warped_atlas])

    return Model(inputs=[src_img, src_atl], outputs=[loss_vol, flow])
Пример #23
0
def miccai2018_net(vol_size,
                   enc_nf,
                   dec_nf,
                   int_steps=7,
                   use_miccai_int=False,
                   indexing='ij',
                   bidir=False,
                   vel_resize=1 / 2):

    ndims = len(vol_size)
    assert ndims in [1, 2,
                     3], "ndims should be one of 1, 2, or 3. found: %d" % ndims

    # get unet
    unet_model = unet_core(vol_size, enc_nf, dec_nf, full_size=False)
    [src, tgt] = unet_model.inputs
    x_out = unet_model.outputs[-1]

    # velocity mean and logsigma layers
    Conv = getattr(KL, 'Conv%dD' % ndims)
    flow_mean = Conv(ndims,
                     kernel_size=3,
                     padding='same',
                     kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5),
                     name='flow')(x_out)
    # we're going to initialize the velocity variance very low, to start stable.
    flow_log_sigma = Conv(
        ndims,
        kernel_size=3,
        padding='same',
        kernel_initializer=RandomNormal(mean=0.0, stddev=1e-10),
        bias_initializer=keras.initializers.Constant(value=-10),
        name='log_sigma')(x_out)
    flow_params = concatenate([flow_mean, flow_log_sigma])

    # velocity sample
    flow = Sample(name="z_sample")([flow_mean, flow_log_sigma])

    # integrate if diffeomorphic (i.e. treating 'flow' above as stationary velocity field)
    if use_miccai_int:
        # for the miccai2018 submission, the squaring layer
        # scaling was essentially built in by the network
        # was manually composed of a Transform and and Add Layer.
        v = flow
        for _ in range(int_steps):
            v1 = nrn_layers.SpatialTransformer(interp_method='linear',
                                               indexing=indexing)([v, v])
            v = keras.layers.add([v, v1])
        flow = v

    else:
        # new implementation in neuron is cleaner.
        z_sample = flow
        flow = nrn_layers.VecInt(method='ss',
                                 name='flow-int',
                                 int_steps=int_steps)(z_sample)
        if bidir:
            rev_z_sample = Negate()(z_sample)
            neg_flow = nrn_layers.VecInt(method='ss',
                                         name='neg_flow-int',
                                         int_steps=int_steps)(rev_z_sample)

    # get up to final resolution
    flow = trf_resize(flow, vel_resize, name='diffflow')

    if bidir:
        neg_flow = trf_resize(neg_flow, vel_resize, name='neg_diffflow')

    # transform
    y = nrn_layers.SpatialTransformer(interp_method='linear',
                                      indexing=indexing)([src, flow])
    if bidir:
        y_tgt = nrn_layers.SpatialTransformer(
            interp_method='linear', indexing=indexing)([tgt, neg_flow])

    # prepare outputs and losses
    outputs = [y, flow_params]
    if bidir:
        outputs = [y, y_tgt, flow_params]

    # build the model
    return Model(inputs=[src, tgt], outputs=outputs)
def labels_to_image_model(labels_shape,
                          crop_shape,
                          generation_label_list,
                          segmentation_label_list,
                          n_channels=1,
                          labels_res=(1,1,1),
                          target_res=None,
                          padding_margin=None,
                          apply_affine_trans=False,
                          apply_nonlin_trans=True,
                          nonlin_shape_factor=0.0625,
                          apply_bias_field=True,
                          bias_shape_factor=0.025,
                          blur_background=True,
                          normalise=True,
                          out_div_32=False,
                          convert_back=False,
                          id=0, # For different layer names if several models.
                          rand_blur=True):
    """
        This function builds a keras/tensorflow model to generate brain images from supplied labels.
        It returns the model as well as the shape ouf the output images without batch and channel dimensions
        (height*width*depth).
        The model takes as inputs:
            -a label image
            -a vector containing the means of the Gaussian distributions to sample for each label,
            -a similar vector for the associated standard deviations.
            -if apply_affine_deformation=True: a (n_dims+1)x(n_dims+1) affine matrix
            -if apply_non_linear_deformation=True: a small non linear field of size batch*x*y*z*n_dims that will be
             resampled to labels size
            -if apply_bias_field=True: a small bias field of size batch*x*y*z*1 that will be resampled to labels size
        The model returns:
            -the generated image
            -the corresponding label map
    :param labels_shape: should be a list or tensor with image dimension plus channel size at the end
    :param n_channels: number of channels to be synthetised
    :param labels_res: list of dimension resolutions of model's inputs
    :param target_res: list of dimension resolutions of model's outputs
    :param crop_shape: list, shape of model's outputs
    :param generation_label_list: list of all the labels in the dataset (internally converted to [0...N-1] and converted
    back to original values at the end of model)
    :param segmentation_label_list: list of all the labels in the output labels (internally converted to [0...N-1] and
    converted back to original values at the end of model)
    :param padding_margin: margin by which the input labels will be 0-padded. This step happens
    before an eventual cropping. Default is None, no padding.
    :param apply_affine_trans: whether to apply affine deformation during generation
    :param apply_nonlin_trans: whether to apply non linear deformation during generation
    :param nonlin_shape_factor: if apply_non_linear_deformation=True, factor between the shapes of the labels and of
    the non-linear field that will be sampled
    :param apply_bias_field: whether to apply a bias field to the created image during generation
    :param bias_shape_factor: if apply_bias_field=True, factor between the shapes of the labels and of the bias field
    that will be sampled
    :param blur_background: Whether background is a regular label, thus blurred with the others.
    :param normalise: whether to normalise data. Default is False.
    :param out_div_32: whether model's outputs must be of shape divisible by 32
    """

    # get shapes
    n_dims = len(labels_shape)
    target_res = format_target_res(target_res, n_dims)
    crop_shape, resample_shape, output_shape, padding_margin = get_shapes(labels_shape,
                                                                          crop_shape,
                                                                          labels_res,
                                                                          target_res,
                                                                          padding_margin,
                                                                          out_div_32)
    # create new_label_list and corresponding LUT to make sure that labels go from 0 to N-1
    n_generation_labels = generation_label_list.shape[0]
    new_generation_label_list = np.arange(n_generation_labels)
    lut = np.zeros(np.max(generation_label_list).astype('int') + 1)
    for n in range(n_generation_labels):
        lut[generation_label_list[n].astype('int')] = n

    # define mandatory inputs
    labels_input = KL.Input(shape=(*labels_shape, 1), name=f'labels_input_{id}')
    means_input = KL.Input(shape=(*new_generation_label_list.shape, n_channels), name=f'means_input_{id}')
    std_devs_input = KL.Input(shape=(*new_generation_label_list.shape, n_channels), name=f'std_devs_input_{id}')
    list_inputs = [labels_input, means_input, std_devs_input]

    # convert labels to new_label_list
    labels = KL.Lambda(lambda x: tf.gather(tf.convert_to_tensor(lut, dtype='int32'),
                                           tf.cast(x, dtype='int32')), name=f'convert_labels_{id}')(labels_input)

    # pad labels
    if padding_margin is not None:
        pad = np.transpose(np.array([[0] + padding_margin + [0]] * 2))
        labels = KL.Lambda(lambda x: tf.pad(x, tf.cast(tf.convert_to_tensor(pad), dtype='int32')), name=f'pad_{id}')(labels)
        labels_shape = labels.get_shape().as_list()[1:-1]

    # cropping
    if crop_shape is not None:
        # get maximum cropping indices in each dimension
        cropping_max_val = [labels_shape[i] - crop_shape[i] for i in range(n_dims)]
        # prepare cropping indices and tensor's new shape
        idx = KL.Lambda(lambda x: tf.zeros([1], dtype='int32'), name=f'no_cropping_batch_{id}')(means_input)  # no cropping
        for val_idx, val in enumerate(cropping_max_val):  # draw cropping indices for image dimensions
            if val > 0:
                idx = KL.Lambda(lambda x: tf.concat(
                    [tf.cast(x, dtype='int32'), K.random_uniform([1], minval=0, maxval=val, dtype='int32')], axis=0),
                                name=f'pick_cropping_idx_{val_idx}_{id}')(idx)
            else:
                idx = KL.Lambda(lambda x: tf.concat([tf.cast(x, dtype='int32'), tf.zeros([1], dtype='int32')], axis=0),
                                name=f'pick_cropping_idx_{val_idx}_{id}')(idx)
        idx = KL.Lambda(lambda x: tf.concat([tf.cast(x, dtype='int32'), tf.zeros([1], dtype='int32')], axis=0),
                        name=f'no_cropping_channel_{id}')(idx)  # no cropping for channel dimension
        patch_shape_tens = KL.Lambda(lambda x: tf.convert_to_tensor([-1] + crop_shape + [-1], dtype='int32'),
                                     name=f'tensor_cropping_idx_{id}')(means_input)
        # perform cropping
        labels = KL.Lambda(
            lambda x: tf.slice(x[0], begin=tf.cast(x[1], dtype='int32'), size=tf.cast(x[2], dtype='int32')),
            name=f'cropping_{id}')([labels, idx, patch_shape_tens])
    else:
        crop_shape = labels_shape

    labels = KL.Lambda(lambda x: tf.cast(x, dtype='float'))(labels)

    # if necessary, resample image and labels at target resolution
    if resample_shape is not None:
        labels = KL.Lambda(lambda x: tf.cast(x, dtype='float32'))(labels)
        zoom_fact = [r / l for r, l in zip(resample_shape, labels_shape)] 
        labels = nrn_layers.Resize(zoom_fact, interp_method='nearest', name=f'resample_labels_{id}')(labels)

    # deform labels
    if apply_affine_trans | apply_nonlin_trans:
        labels._keras_shape = tuple(labels.get_shape().as_list())
        trans_inputs = [labels]
        # add affine deformation to inputs list
        if apply_affine_trans:
            aff_in = KL.Input(shape=(n_dims + 1, n_dims + 1), name=f'aff_input_{id}')
            list_inputs.append(aff_in)
            trans_inputs.append(aff_in)
        # prepare non-linear deformation field and add it to inputs list
        if apply_nonlin_trans:
            def_field_size = get_nonlin_field_shape(crop_shape, nonlin_shape_factor)
            nonlin_field_in = KL.Input(shape=def_field_size, name=f'nonlin_input_{id}')
            list_inputs.append(nonlin_field_in)
            int_at = 2.0
            zoom = [o / d / int_at for o, d in zip(output_shape, def_field_size)] 
            vel_field = nonlin_field_in
            vel_field = nrn_layers.Resize(zoom, interp_method='linear', name=f'resize_vel_{id}')(vel_field)
            def_field = nrn_layers.VecInt(int_steps=5)(vel_field)
            #def_field = nrn_layers.RescaleValues(int_at)(def_field)
            def_field = nrn_layers.Resize(int_at, interp_method='linear', name=f'resize_def_{id}')(def_field)
            trans_inputs.append(def_field)

        # apply deformations
        labels = nrn_layers.SpatialTransformer(interp_method='nearest', name=f'trans_{id}')(trans_inputs)
    labels = KL.Lambda(lambda x: tf.cast(x, dtype='int32'))(labels)

    # sample from normal distribution
    image = KL.Lambda(lambda x: tf.random.normal(tf.shape(x)),  name=f'sample_normal_{id}')(labels)

    # build synthetic image
    f_cat = lambda x: tf.concat([x+n_generation_labels*i for i in range(n_channels)], -1)
    cat_labels = KL.Lambda(f_cat, name=f'cat_labels_{id}')(labels)
    f_gather = lambda x: tf.gather(tf.reshape(x[0], [-1]), tf.cast(x[1], dtype='int32'))
    f_map = lambda x: tf.map_fn(f_gather, x, dtype='float32')
    means = KL.Lambda(f_map)([means_input, cat_labels])
    std_devs = KL.Lambda(f_map)([std_devs_input, cat_labels])
    image = KL.Multiply(name=f'mul_std_dev_{id}')([image, std_devs])
    image = KL.Add(name=f'add_means_{id}')([image, means])

    if rand_blur:
        shape = [5] * n_dims 
        lim = [(s - 1) / 2 for s in shape]
        lim = [np.arange(-l, l+1) for l in lim]
        grid = np.meshgrid(*lim, indexing='ij')
        grid = [g ** 2 for g in grid]
        c_grid = KL.Lambda(lambda x: tf.constant(np.stack(grid), dtype='float32'))([])
        sigma = KL.Lambda(lambda x: tf.random.uniform((n_dims,), minval=1e-6, maxval=1))([])
        f = lambda x: x[0] / x[1]**2
        kernel = KL.Lambda(lambda x: tf.map_fn(f, x, dtype='float32'))([c_grid, sigma])
        kernel = KL.Lambda(lambda x: tf.exp( -tf.reduce_sum(x, axis=0) ))(kernel)
        kernel = KL.Lambda(lambda x: x[..., None, None] / tf.reduce_sum(x))(kernel)
    else:
        if (target_res is None) | (labels_res == target_res):
            sigma = [0.55] * n_dims
        else:
            sigma = [0.85 * labels_res[i] / target_res[i] for i in range(n_dims)]
        kernel = KL.Lambda(lambda x: tf.convert_to_tensor(add_axis(add_axis(gauss_kernel(sigma, n_dims), -1), -1),
                                                          dtype=x.dtype), name=f'gauss_kernel_{id}')(image)

    if n_channels == 1:
        image = KL.Lambda(lambda x: tf.nn.convolution(x[0], x[1], padding='SAME', strides=[1] * n_dims),
                          name=f'blur_image_{id}')([image, kernel])
        mask = KL.Lambda(lambda x: tf.where(tf.greater(x, 0), tf.ones_like(x, dtype='float32'),
                                            tf.zeros_like(x, dtype='float32')), name=f'masking_{id}')(labels)
        if not blur_background:
            blurred_mask = KL.Lambda(lambda x: tf.nn.convolution(x[0], x[1], padding='SAME', strides=[1] * n_dims),
                                     name=f'blur_mask_{id}')([mask, kernel])
            image = KL.Lambda(lambda x: x[0] / (x[1] + K.epsilon()), name=f'masked_blurring_{id}')([image, blurred_mask])
            bckgd_mean = KL.Lambda(lambda x: tf.random.uniform((1, 1), 0, 10), name=f'bckgd_mean_{id}')([])
            bckgd_std = KL.Lambda(lambda x: tf.random.uniform((1, 1), 0, 5), name=f'bckgd_std_{id}')([])
            rand_flip = KL.Lambda(lambda x: K.greater(tf.random.uniform((1, 1), 0, 1), 0.5), name=f'bool_{id}')([])
            bckgd_mean = KL.Lambda(lambda y: K.switch(y[0],
                                                      KL.Lambda(lambda x: tf.zeros_like(x))(y[1]),
                                                      y[1]), name=f'switch_backgd_mean_{id}')([rand_flip, bckgd_mean])
            bckgd_std = KL.Lambda(lambda y: K.switch(y[0],
                                                     KL.Lambda(lambda x: tf.zeros_like(x))(y[1]),
                                                     y[1]), name=f'switch_backgd_std_{id}')([rand_flip, bckgd_std])
            background = KL.Lambda(lambda x: x[1] + x[2] * tf.random.normal(tf.shape(x[0])),
                                   name=f'gaussian_bckgd_{id}')([image, bckgd_mean, bckgd_std])
            image = KL.Lambda(lambda x: tf.where(tf.cast(x[1], dtype='bool'), x[0], x[2]),
                              name=f'mask_blurred_image_{id}')([image, mask, background])
        else:
            rand_flip = KL.Lambda(lambda x: K.greater(tf.random.uniform((1, 1), 0, 1), 0.8), name=f'bool_{id}')([])
            image = KL.Lambda(lambda y: K.switch(y[0], KL.Lambda(
                lambda x: tf.where(tf.cast(x[1], dtype='bool'), x[0], tf.zeros_like(x[0])), name=f'mask_image_{id}')(
                [y[1], y[2]]), y[1]), name=f'switch_backgd_reset_{id}')([rand_flip, image, mask])

    else:
        # blur each image channel separately
        split = KL.Lambda(lambda x: tf.split(x, [1]*n_channels, axis=-1))(image)
        image = KL.Lambda(lambda x: tf.nn.convolution(x[0], x[1], padding='SAME', strides=[1] * n_dims),
                          name=f'blurring_0_{id}')([split[0], kernel])
        for i in range(1, n_channels):
            temp_blurred = KL.Lambda(lambda x: tf.nn.convolution(x[0], x[1], padding='SAME', strides=[1] * n_dims),
                                     name=f'blurring_{i}_{id}')([split[i], kernel])
            mask = KL.Lambda(lambda x: tf.where(tf.greater(x, 0), tf.ones_like(x, dtype='float32'),
                                                tf.zeros_like(x, dtype='float32')), name=f'masking_{i}_{id}')(labels)
            if not blur_background:
                blurred_mask = KL.Lambda(lambda x: tf.nn.convolution(x[0], x[1], padding='SAME', strides=[1] * n_dims),
                                         name=f'blur_mask_{i}_{id}')([mask, kernel])
                temp_blurred = KL.Lambda(lambda x: x[0] / (x[1]+K.epsilon()),
                                         name=f'masked_blurring_{i}_{id}')([temp_blurred, blurred_mask])
                bckgd_mean = KL.Lambda(lambda x: tf.random.uniform((1, 1), 0, 10), name=f'bckgd_mean_{i}_{id}')([])
                bckgd_std = KL.Lambda(lambda x: tf.random.uniform((1, 1), 0, 5), name=f'bckgd_std_{i}_{id}')([])
                rand_flip = KL.Lambda(lambda x: K.greater(tf.random.uniform((1, 1), 0, 1), 0.5), name=f'bool{i}_{id}')([])
                bckgd_mean = KL.Lambda(lambda y: K.switch(y[0],
                                                          KL.Lambda(lambda x: tf.zeros_like(x, dtype='float32'))(y[1]),
                                                          y[1]), name=f'switch_backgd_mean{i}_{id}')([rand_flip, bckgd_mean])
                bckgd_std = KL.Lambda(lambda y: K.switch(y[0],
                                                         KL.Lambda(lambda x: tf.zeros_like(x, dtype='float32'))(y[1]),
                                                         y[1]), name=f'switch_backgd_std_{i}_{id}')([rand_flip, bckgd_std])
                background = KL.Lambda(lambda x: x[1] + x[2] * tf.random.normal(tf.shape(x[0])),
                                       name=f'gaussian_bckgd_{i}_{id}')([temp_blurred, bckgd_mean, bckgd_std])
                temp_blurred = KL.Lambda(lambda x: tf.where(tf.cast(x[1], dtype='bool'), x[0], x[2]),
                                         name=f'mask_blurred_image_{i}_{id}')([temp_blurred, mask, background])
            else:
                rand_flip = KL.Lambda(lambda x: K.greater(tf.random.uniform((1, 1), 0, 1), 0.8), name=f'boo{i}_{id}')([])
                image = KL.Lambda(lambda y: K.switch(y[0], KL.Lambda(
                    lambda x: tf.where(tf.cast(x[1], dtype='bool'), x[0], tf.zeros_like(x[0])), name=f'mask_image_{i}_{id}')(
                    [y[1], y[2]]), y[1]), name=f'switch_backgd_reset_{i}_{id}')([rand_flip, image, mask])
            image = KL.Lambda(lambda x: tf.concat([x[0], x[1]], -1),
                              name=f'cat_blurring_{i}_{id}')([image, temp_blurred])

    # apply bias field
    if apply_bias_field:
        # format bias field and add it to inputs list
        bias_field_size = get_bias_field_shape(output_shape, bias_shape_factor)
        bias_field_in = KL.Input(shape=bias_field_size, name=f'bias_input_{id}')
        list_inputs.append(bias_field_in)
        # resize bias field and apply it to image
        zoom_fact = [o / d for o, d in zip(output_shape, bias_field_size)] 
        bias_field = nrn_layers.Resize(zoom_fact, interp_method='linear', name=f'log_bias_{id}')(bias_field_in)
        bias_field = KL.Lambda(lambda x: K.exp(x), name=f'bias_field_{id}')(bias_field)
        image._keras_shape = tuple(image.get_shape().as_list())
        bias_field._keras_shape = tuple(bias_field.get_shape().as_list())
        image = KL.multiply([bias_field, image])

    # make sure image's intensities are between 0 and 255
    image = KL.Lambda(lambda x: K.clip(x, 0, 255), name=f'clipping_{id}')(image)

    # contrast stretching
    image = KL.Lambda(
        lambda x: x * tf.random.uniform([1], minval=0.6, maxval=1.4) + tf.random.uniform([1], minval=-30, maxval=30),
        name=f'stretching_{id}')(image)

    # convert labels back to original values and remove unwanted labels
    if convert_back:
        out_lut = [x if x in segmentation_label_list else 0 for x in generation_label_list]
    else:
        # Rebase wanted indices into [0, N-1] for one-hot encoding.
        n = 0
        out_lut = [None] * len(generation_label_list)
        for i, x in enumerate(generation_label_list):
            out = -1
            if x in segmentation_label_list:
                out = n
                n += 1
            out_lut[i] = out
    labels = KL.Lambda(lambda x: tf.gather(tf.cast(out_lut, dtype='int32'),
                                           tf.cast(x, dtype='int32')), name=f'labels_back_{id}')(labels)

    # normalise the produced image (include labels_out, so this layer is not removed when plugging in other keras model)
    if normalise:
        m = KL.Lambda(lambda x: K.min(x), name=f'min_{id}')(image)
        M = KL.Lambda(lambda x: K.max(x), name=f'max_{id}')(image)
        image = KL.Lambda(lambda x: (x[0]-x[1])/(x[2]-x[1]), name=f'normalisation_{id}')([image, m, M])
    else:
        image = KL.Lambda(lambda x: x[0] + K.zeros(1), name=f'dummy_{id}')([image])

    # gamma augmentation
    image = KL.Lambda(lambda x: tf.math.pow(x[0], tf.math.exp(tf.random.normal([1], mean=0, stddev=0.25))),
                      name=f'gamma_{id}')([image, labels])

    outputs = [image, labels]
    if apply_nonlin_trans:
        outputs.append(vel_field)
    brain_model = keras.Model(inputs=list_inputs, outputs=outputs)
    return brain_model, def_field_size, bias_field_size
Пример #25
0
def diff_net(vol_size,
             enc_nf,
             dec_nf,
             int_steps=7,
             src_feats=1,
             indexing='ij',
             bidir=False,
             ret_flows=False,
             full_size=False,
             vel_resize=1 / 2,
             src=None,
             tgt=None):
    """
    diffeomorphic net, similar to miccai2018, but no sampling.

    architecture for probabilistic diffeomoprhic VoxelMorph presented in the MICCAI 2018 paper. 
    You may need to modify this code (e.g., number of layers) to suit your project needs.

    The stationary velocity field operates in a space (0.5)^3 of vol_size for computational reasons.

    :param vol_size: volume size. e.g. (256, 256, 256)
    :param enc_nf: list of encoder filters. right now it needs to be 1x4.
           e.g. [16,32,32,32]
    :param dec_nf: list of decoder filters. right now it must be 1x6, see unet function.
    :param use_miccai_int: whether to use the manual miccai implementation of scaling and squaring integration
            note that the 'velocity' field outputted in that case was 
            since then we've updated the code to be part of a flexible layer. see neuron.layers.VecInt
            **This param will be phased out (set to False behavior)**
    :param int_steps: the number of integration steps
    :param indexing: xy or ij indexing. we recommend ij indexing if training from scratch. 
            miccai 2018 runs were done with xy indexing.
            **This param will be phased out (set to 'ij' behavior)**
    :return: the keras model
    """
    ndims = len(vol_size)
    assert ndims in [1, 2,
                     3], "ndims should be one of 1, 2, or 3. found: %d" % ndims

    # get unet
    unet_model = unet_core(vol_size,
                           enc_nf,
                           dec_nf,
                           full_size=full_size,
                           src=src,
                           tgt=tgt,
                           src_feats=src_feats)
    [src, tgt] = unet_model.inputs

    # velocity sample
    # unet_model.layers[-1].name = 'vel'
    # vel = unet_model.output
    x_out = unet_model.outputs[-1]

    # velocity mean and logsigma layers
    Conv = getattr(KL, 'Conv%dD' % ndims)
    vel = Conv(ndims,
               kernel_size=3,
               padding='same',
               kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5),
               name='flow')(x_out)

    if full_size and vel_resize != 1:
        vel = trf_resize(vel, 1.0 / vel_resize, name='flow-resize')

    # new implementation in neuron is cleaner.
    flow = nrn_layers.VecInt(method='ss', name='flow-int',
                             int_steps=int_steps)(vel)
    if bidir:
        # rev_z_sample = Lambda(lambda x: -x)(z_sample)
        neg_vel = Negate()(vel)
        neg_flow = nrn_layers.VecInt(method='ss',
                                     name='neg_flow-int',
                                     int_steps=int_steps)(neg_vel)

    # get up to final resolution
    flow = trf_resize(flow, vel_resize, name='diffflow')
    if bidir:
        neg_flow = trf_resize(neg_flow, vel_resize, name='neg_diffflow')

    # transform
    y = nrn_layers.SpatialTransformer(interp_method='linear',
                                      indexing=indexing,
                                      name='warped_src')([src, flow])
    if bidir:
        y_tgt = nrn_layers.SpatialTransformer(interp_method='linear',
                                              indexing=indexing,
                                              name='warped_tgt')(
                                                  [tgt, neg_flow])

    # prepare outputs and losses
    outputs = [y, vel]
    if bidir:
        outputs = [y, y_tgt, vel]

    model = Model(inputs=[src, tgt], outputs=outputs)

    if ret_flows:
        outputs += [
            model.get_layer('diffflow').output,
            model.get_layer('neg_diffflow').output
        ]
        return Model(inputs=[src, tgt], outputs=outputs)
    else:
        return model