Ejemplo n.º 1
0
    def make_lstm_network(self):

        x0 = tf.keras.Input(shape=[None, 8, 8, self.num_channels])

        x = layers.ConvLSTM2D(1024, (3, 3),
                              strides=(2, 2),
                              padding='same',
                              activation='tanh',
                              use_bias=False,
                              unit_forget_bias=False,
                              return_sequences=True)(x0)

        x = layers.GlobalMaxPooling3D()(x)

        x = layers.Dense(self.num_classes,
                         activation='softmax',
                         name='predictions')(x)

        return tf.keras.Model(inputs=x0, outputs=x)
Ejemplo n.º 2
0
def ResNet(stack_fn,
           preact,
           use_bias,
           model_name='resnet',
           include_top=True,
           input_tensor=None,
           input_shape=None,
           pooling=None,
           classes=1000,
           **kwargs):
    """Instantiates the ResNet, ResNetV2, and ResNeXt architecture.
    Optionally loads weights pre-trained on ImageNet.
    Note that the data format convention used by the model is
    the one specified in your Keras config at `~/.keras/keras.json`.
    # Arguments
        stack_fn: a function that returns output tensor for the
            stacked residual blocks.
        preact: whether to use pre-activation or not
            (True for ResNetV2, False for ResNet and ResNeXt).
        use_bias: whether to use biases for convolutional layers or not
            (True for ResNet and ResNetV2, False for ResNeXt).
        model_name: string, model name.
        include_top: whether to include the fully-connected
            layer at the top of the network.
        input_tensor: optional Keras tensor
            (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(224, 224, 3)` (with `channels_last` data format)
            or `(3, 224, 224)` (with `channels_first` data format).
            It should have exactly 3 inputs channels.
        pooling: optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.
    # Returns
        A Keras model instance.
    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
    """


    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        if not backend.is_keras_tensor(input_tensor):
            img_input = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

    x = layers.ZeroPadding3D(padding=3, name='conv1_pad')(img_input)
    x = layers.Conv3D(64, 7, strides=2, use_bias=use_bias, name='conv1_conv')(x)

    if preact is False:
        x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
                                      name='conv1_bn')(x)
        x = layers.Activation('relu', name='conv1_relu')(x)

    x = layers.ZeroPadding3D(padding=1, name='pool1_pad')(x)
    x = layers.MaxPooling3D(3, strides=2, name='pool1_pool')(x)

    x = stack_fn(x)

    if preact is True:
        x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
                                      name='post_bn')(x)
        x = layers.Activation('relu', name='post_relu')(x)

    if include_top:
        x = layers.GlobalAveragePooling3D(name='avg_pool')(x)
        x = layers.Dense(classes, activation='softmax', name='probs')(x)
    else:
        if pooling == 'avg':
            x = layers.GlobalAveragePooling3D(name='avg_pool')(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling3D(name='max_pool')(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = keras_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    model = models.Model(inputs, x, name=model_name)

    return model
Ejemplo n.º 3
0
def buildModel():

    l_in = layers.Input((
        32,
        32,
        32,
        13,
    ), name="Input")

    #first inseption module
    l_ins_0 = layers.Conv3D(filters=32,
                            padding='same',
                            activation='relu',
                            kernel_size=(1, 1, 1))(l_in)

    l_ins_1 = layers.Conv3D(filters=32,
                            padding='same',
                            activation='relu',
                            kernel_size=(1, 1, 1))(l_in)
    l_ins_1 = layers.Conv3D(filters=32,
                            padding='same',
                            activation='relu',
                            kernel_size=(3, 3, 3))(l_ins_1)

    l_ins_2 = layers.Conv3D(filters=32,
                            padding='same',
                            activation='relu',
                            kernel_size=(1, 1, 1))(l_in)
    l_ins_2 = layers.Conv3D(filters=32,
                            padding='same',
                            activation='relu',
                            kernel_size=(5, 5, 5))(l_ins_2)

    l_ins_3 = layers.MaxPooling3D(pool_size=(3, 3, 3),
                                  strides=(1, 1, 1),
                                  padding='same')(l_in)
    l_ins_3 = layers.Conv3D(filters=32,
                            padding='same',
                            activation='relu',
                            kernel_size=(1, 1, 1))(l_ins_3)

    l_ins = layers.Concatenate()([l_ins_0, l_ins_1, l_ins_2, l_ins_3])
    l_ins = layers.Dropout(rate=0.1)(l_ins)

    l_in_2 = layers.MaxPooling3D(pool_size=(3, 3, 3), strides=(1, 1, 1))(l_ins)
    l_in_2 = layers.BatchNormalization()(l_in_2)

    #second inseption module
    l_ins_0 = layers.Conv3D(filters=32,
                            padding='same',
                            activation='relu',
                            kernel_size=(1, 1, 1))(l_in_2)

    l_ins_1 = layers.Conv3D(filters=32,
                            padding='same',
                            activation='relu',
                            kernel_size=(1, 1, 1))(l_in_2)
    l_ins_1 = layers.Conv3D(filters=32,
                            padding='same',
                            activation='relu',
                            kernel_size=(3, 3, 3))(l_ins_1)

    l_ins_2 = layers.Conv3D(filters=32,
                            padding='same',
                            activation='relu',
                            kernel_size=(1, 1, 1))(l_in_2)
    l_ins_2 = layers.Conv3D(filters=32,
                            padding='same',
                            activation='relu',
                            kernel_size=(5, 5, 5))(l_ins_2)

    l_ins_3 = layers.MaxPooling3D(pool_size=(3, 3, 3),
                                  strides=(1, 1, 1),
                                  padding='same')(l_in_2)
    l_ins_3 = layers.Conv3D(filters=32,
                            padding='same',
                            activation='relu',
                            kernel_size=(1, 1, 1))(l_ins_3)

    l_ins2 = layers.Concatenate()([l_ins_0, l_ins_1, l_ins_2, l_ins_3, l_in_2])
    l_ins2 = layers.Dropout(rate=0.1)(l_ins2)

    l_enc = layers.GlobalMaxPooling3D()(l_ins2)

    #dense layers
    l_enc = layers.Dense(128, activation='relu')(l_enc)
    l_enc = layers.BatchNormalization()(l_enc)
    l_enc = layers.Dropout(rate=0.1)(l_enc)

    l_enc = layers.Dense(64, activation='relu')(l_enc)
    l_enc = layers.Dropout(rate=0.1)(l_enc)

    #to binary loss
    l_out = layers.Dense(1, activation='sigmoid')(l_enc)

    mdl = tf.keras.Model([l_in], l_out)
    mdl.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])

    mdl.summary()
    #plot_model(mdl, to_file='nvot.png', show_shapes= True);

    return mdl
def mydensenet(blocks_in_dense=2,
               dense_conv_blocks=2,
               dense_layers=1,
               num_dense_connections=256,
               filters=16,
               growth_rate=16,
               reduction=0.5,
               dropout=0.5,
               output_bias=None,
               loss=None,
               channels=2,
               global_max=False,
               GN=True,
               **kwargs):
    """
    :param blocks_in_dense: how many convolution blocks are in a single size layer
    :param dense_conv_blocks: how many dense blocks before a max pooling to occur
    :param dense_layers: number of dense layers
    :param num_dense_connections:
    :param filters:
    :param growth_rate:
    :param kwargs:
    :return:
    """
    if output_bias is not None:
        output_bias = initializers.Constant(output_bias)
    blocks_in_dense = int(blocks_in_dense)
    dense_conv_blocks = int(dense_conv_blocks)
    dense_layers = int(dense_layers)
    num_dense_connections = int(num_dense_connections)
    filters = int(filters)
    growth_rate = int(growth_rate)
    reduction = float(reduction)
    dropout = float(dropout)
    input_shape = (32, 64, 64, channels)
    img_input = layers.Input(shape=input_shape)
    x = img_input
    inputs = (img_input, )

    x = layers.Conv3D(filters, (3, 7, 7),
                      strides=2,
                      use_bias=False,
                      name='conv1/conv',
                      padding='Same')(x)

    for i in range(dense_conv_blocks):
        x = dense_block3d(x=x,
                          growth_rate=growth_rate,
                          blocks=blocks_in_dense,
                          name='conv{}'.format(i),
                          GN=GN)
        x = transition_block(x=x,
                             reduction=reduction,
                             name='pool{}'.format(i),
                             GN=GN)
    # x = layers.BatchNormalization(axis=-1, epsilon=1.001e-5, name='bn')(x)
    if GN:
        x = GroupNormalization(groups=2, axis=-1, name='gn')(x)
    else:
        x = layers.BatchNormalization(name='bn')(x)
    x = layers.Activation('selu', name='selu')(x)
    if global_max:
        x = layers.GlobalMaxPooling3D()(x)
    else:
        x = layers.MaxPooling3D(pool_size=(2, 2, 2),
                                name='final_average_pooling')(x)
        x = layers.Flatten()(x)

    for i in range(dense_layers):
        x = layers.Dense(num_dense_connections,
                         activation='selu',
                         kernel_regularizer=regularizers.l2(0.001))(x)
        if dropout != 0.0:
            x = layers.Dropout(dropout)(x)
    activation = 'softmax'
    out_channels = 2
    if loss == 'SigmoidFocal':
        activation = 'sigmoid'
        out_channels = 1
    x = layers.Dense(out_channels,
                     activation=activation,
                     name='prediction',
                     dtype='float32',
                     bias_initializer=output_bias)(x)
    model = Model(inputs=inputs, outputs=(x, ), name='my_3d_densenet')
    return model
Ejemplo n.º 5
0
def design_dnn(nb_features,
               input_shape,
               nb_levels,
               conv_size,
               nb_labels,
               feat_mult=1,
               pool_size=2,
               padding='same',
               activation='elu',
               final_layer='dense-sigmoid',
               conv_dropout=0,
               conv_maxnorm=0,
               nb_input_features=1,
               batch_norm=False,
               name=None,
               prefix=None,
               use_strided_convolution_maxpool=True,
               nb_conv_per_level=2):
    """
    "deep" cnn with dense or global max pooling layer @ end...

    Could use sequential...
    """
    def _global_max_nd(xtens):
        ytens = K.batch_flatten(xtens)
        return K.max(ytens, 1, keepdims=True)

    model_name = name
    if model_name is None:
        model_name = 'model_1'
    if prefix is None:
        prefix = model_name

    ndims = len(input_shape)
    input_shape = tuple(input_shape)

    convL = getattr(KL, 'Conv%dD' % ndims)
    maxpool = KL.MaxPooling3D if len(input_shape) == 3 else KL.MaxPooling2D
    if isinstance(pool_size, int):
        pool_size = (pool_size, ) * ndims

    # kwargs for the convolution layer
    conv_kwargs = {'padding': padding, 'activation': activation}
    if conv_maxnorm > 0:
        conv_kwargs['kernel_constraint'] = maxnorm(conv_maxnorm)

    # initialize a dictionary
    enc_tensors = {}

    # first layer: input
    name = '%s_input' % prefix
    enc_tensors[name] = KL.Input(shape=input_shape + (nb_input_features, ),
                                 name=name)
    last_tensor = enc_tensors[name]

    # down arm:
    # add nb_levels of conv + ReLu + conv + ReLu. Pool after each of first nb_levels - 1 layers
    for level in range(nb_levels):
        for conv in range(nb_conv_per_level):
            if conv_dropout > 0:
                name = '%s_dropout_%d_%d' % (prefix, level, conv)
                enc_tensors[name] = KL.Dropout(conv_dropout)(last_tensor)
                last_tensor = enc_tensors[name]

            name = '%s_conv_%d_%d' % (prefix, level, conv)
            nb_lvl_feats = np.round(nb_features * feat_mult**level).astype(int)
            enc_tensors[name] = convL(nb_lvl_feats,
                                      conv_size,
                                      **conv_kwargs,
                                      name=name)(last_tensor)
            last_tensor = enc_tensors[name]

        # max pool
        if use_strided_convolution_maxpool:
            name = '%s_strided_conv_%d' % (prefix, level)
            enc_tensors[name] = convL(nb_lvl_feats,
                                      pool_size,
                                      **conv_kwargs,
                                      name=name)(last_tensor)
            last_tensor = enc_tensors[name]
        else:
            name = '%s_maxpool_%d' % (prefix, level)
            enc_tensors[name] = maxpool(pool_size=pool_size,
                                        name=name,
                                        padding=padding)(last_tensor)
            last_tensor = enc_tensors[name]

    # dense layer
    if final_layer == 'dense-sigmoid':

        name = "%s_flatten" % prefix
        enc_tensors[name] = KL.Flatten(name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        name = '%s_dense' % prefix
        enc_tensors[name] = KL.Dense(1, name=name,
                                     activation="sigmoid")(last_tensor)

    elif final_layer == 'dense-tanh':

        name = "%s_flatten" % prefix
        enc_tensors[name] = KL.Flatten(name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        name = '%s_dense' % prefix
        enc_tensors[name] = KL.Dense(1, name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        # Omittting BatchNorm for now, it seems to have a cpu vs gpu problem
        # https://github.com/tensorflow/tensorflow/pull/8906
        # https://github.com/fchollet/keras/issues/5802
        # name = '%s_%s_bn' % prefix
        # enc_tensors[name] = KL.BatchNormalization(axis=batch_norm, name=name)(last_tensor)
        # last_tensor = enc_tensors[name]

        name = '%s_%s_tanh' % prefix
        enc_tensors[name] = KL.Activation(activation="tanh",
                                          name=name)(last_tensor)

    elif final_layer == 'dense-softmax':

        name = "%s_flatten" % prefix
        enc_tensors[name] = KL.Flatten(name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        name = '%s_dense' % prefix
        enc_tensors[name] = KL.Dense(nb_labels,
                                     name=name,
                                     activation="softmax")(last_tensor)

    # global max pooling layer
    elif final_layer == 'myglobalmaxpooling':

        name = '%s_batch_norm' % prefix
        enc_tensors[name] = KL.BatchNormalization(axis=batch_norm,
                                                  name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        name = '%s_global_max_pool' % prefix
        enc_tensors[name] = KL.Lambda(_global_max_nd, name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        name = '%s_global_max_pool_reshape' % prefix
        enc_tensors[name] = KL.Reshape((1, 1), name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        # cannot do activation in lambda layer. Could code inside, but will do extra lyaer
        name = '%s_global_max_pool_sigmoid' % prefix
        enc_tensors[name] = KL.Conv1D(1,
                                      1,
                                      name=name,
                                      activation="sigmoid",
                                      use_bias=True)(last_tensor)

    elif final_layer == 'globalmaxpooling':

        name = '%s_conv_to_featmaps' % prefix
        enc_tensors[name] = KL.Conv3D(2, 1, name=name,
                                      activation="relu")(last_tensor)
        last_tensor = enc_tensors[name]

        name = '%s_global_max_pool' % prefix
        enc_tensors[name] = KL.GlobalMaxPooling3D(name=name)(last_tensor)
        last_tensor = enc_tensors[name]

        # cannot do activation in lambda layer. Could code inside, but will do extra lyaer
        name = '%s_global_max_pool_softmax' % prefix
        enc_tensors[name] = KL.Activation('softmax', name=name)(last_tensor)

    last_tensor = enc_tensors[name]

    # create the model
    model = Model(inputs=[enc_tensors['%s_input' % prefix]],
                  outputs=[last_tensor],
                  name=model_name)
    return model
Ejemplo n.º 6
0
    def __init__(self,
                 inshape,
                 nb_labels,
                 nb_unet_features=None,
                 init_mu=None,
                 init_sigma=None,
                 warp_atlas=True,
                 stat_post_warp=True,
                 stat_nb_feats=16,
                 network_stat_weight=0.001,
                 **kwargs):
        """ 
        Parameters:
            inshape: Input shape. e.g. (192, 192, 192)
            nb_labels: Number of labels in probabilistic atlas.
            nb_unet_features: Unet convolutional features. See VxmDense documentation for more information.
            init_mu: Optional initialization for gaussian means. Default is None.
            init_sigma: Optional initialization for gaussian sigmas. Default is None.
            stat_post_warp: Computes gaussian stats using the warped atlas. Default is True.
            stat_nb_feats: Number of features in the stats convolutional layer. Default is 16.
            network_stat_weight: Relative weight of the stats learned by the network. Default is 0.001.
            kwargs: Forwarded to the internal VxmDense model.
        """

        # ensure correct dimensionality
        ndims = len(inshape)
        assert ndims in [
            1, 2, 3
        ], 'ndims should be one of 1, 2, or 3. found: %d' % ndims

        # build warp network
        vxm_model = VxmDense(inshape,
                             nb_unet_features=nb_unet_features,
                             src_feats=nb_labels,
                             **kwargs)

        # extract necessary layers from the network
        # important to note that we're warping the atlas to the image in this case and
        # we'll swap the input order later
        atlas, image = vxm_model.inputs
        warped_atlas = vxm_model.references.y_source if warp_atlas else atlas
        flow = vxm_model.references.pos_flow

        # compute stat using the warped atlas (or not)
        if stat_post_warp:
            assert warp_atlas, 'must enable warp_atlas if computing stat post warp'
            combined = KL.concatenate([warped_atlas, image],
                                      name='post_warp_concat')
        else:
            # use last convolution in the unet before the flow convolution
            combined = vxm_model.references.unet_model.layers[-2].output

        # convolve into nlabel-stat volume
        conv = _conv_block(combined, stat_nb_feats)
        conv = _conv_block(conv, nb_labels)

        Conv = getattr(KL, 'Conv%dD' % ndims)
        weaknorm = KI.RandomNormal(mean=0.0, stddev=1e-5)

        # convolve into mu and sigma volumes
        stat_mu_vol = Conv(nb_labels,
                           kernel_size=3,
                           name='mu_vol',
                           kernel_initializer=weaknorm,
                           bias_initializer=weaknorm)(conv)
        stat_logssq_vol = Conv(nb_labels,
                               kernel_size=3,
                               name='logsigmasq_vol',
                               kernel_initializer=weaknorm,
                               bias_initializer=weaknorm)(conv)

        # pool to get 'final' stat
        stat_mu = KL.GlobalMaxPooling3D(name='mu_pooling')(stat_mu_vol)
        stat_logssq = KL.GlobalMaxPooling3D(
            name='logssq_pooling')(stat_logssq_vol)

        # combine mu with initialization
        if init_mu is not None:
            init_mu = np.array(init_mu)
            stat_mu = KL.Lambda(lambda x: network_stat_weight * x + init_mu,
                                name='comb_mu')(stat_mu)

        # combine sigma with initialization
        if init_sigma is not None:
            init_logsigmasq = np.array([2 * np.log(f) for f in init_sigma])
            stat_logssq = KL.Lambda(
                lambda x: network_stat_weight * x + init_logsigmasq,
                name='comb_sigma')(stat_logssq)

        # unnorm loglike
        def unnorm_loglike(I, mu, logsigmasq, use_log=True):
            P = tf.distributions.Normal(mu, K.exp(logsigmasq / 2))
            return P.log_prob(I) if use_log else P.prob(I)

        uloglhood = KL.Lambda(lambda x: unnorm_loglike(*x),
                              name='unsup_likelihood')(
                                  [image, stat_mu, stat_logssq])

        # compute data loss as a layer, because it's a bit easier than outputting a ton of things
        def logsum(prob_ll, atl):
            # safe computation using the log sum exp trick (NOTE: this does not normalize p)
            # https://www.xarg.org/2016/06/the-log-sum-exp-trick-in-machine-learning
            logpdf = prob_ll + K.log(atl + K.epsilon())
            alpha = tf.reduce_max(logpdf, -1, keepdims=True)
            return alpha + tf.log(
                tf.reduce_sum(K.exp(logpdf - alpha), -1, keepdims=True) +
                K.epsilon())

        loss_vol = KL.Lambda(lambda x: logsum(*x))([uloglhood, warped_atlas])

        # initialize the keras model
        super().__init__(inputs=[image, atlas], outputs=[loss_vol, flow])

        # cache pointers to layers and tensors for future reference
        self.references = LoadableModel.ReferenceContainer()
        self.references.vxm_model = vxm_model
        self.references.uloglhood = uloglhood
        self.references.stat_mu = stat_mu
        self.references.stat_logssq = stat_logssq
Ejemplo n.º 7
0
  pass
from sklearn.metrics import classification_report, confusion_matrix

print('desired shape')
print((z_max-z_min, y_max-y_min, x_max-x_min, 1 + len(atom_type) + len(atom_pos)))
# Create the discriminator
discriminator = keras.Sequential(
    [
        keras.Input(shape=(z_max-z_min, y_max-y_min, x_max-x_min, 1 + len(atom_type) + len(atom_pos))),
        layers.Conv3D(64, (3, 3, 3), strides=(2, 2, 2), padding="same"),
        layers.LeakyReLU(alpha=0.2),
        layers.Conv3D(128, (3, 3, 3), strides=(2, 2, 2), padding="same"),
        layers.LeakyReLU(alpha=0.2),
<<<<<<< HEAD
=======
        layers.GlobalMaxPooling3D(),
>>>>>>> main
        layers.Dense(1),
    ],
    name="discriminator",
)

# Create the generator
latent_dim = 1 + len(atom_type) + len(atom_pos)
generator = keras.Sequential(
    [
        keras.Input(shape=(latent_dim,)),
        layers.Dense((z_max-z_min)* (y_max-y_min)* (x_max-x_min)* latent_dim),
        layers.LeakyReLU(alpha=0.2),
        layers.Reshape((z_max-z_min, y_max-y_min, x_max-x_min, latent_dim)),
        layers.Conv3DTranspose(latent_dim, (4, 4, 4), strides=(1, 1, 1), padding="same"),