Пример #1
0
def createDeconvNetwork():
    model = models.Sequential()
    model.add(layers.Conv3D(64, (2, 3, 3), input_shape=(6, 400, 400, 1)))
    model.add(layers.Conv3D(64, (1, 3, 3)))
    model.add(layers.MaxPooling3D((1, 2, 2)))
    model.add(layers.Conv3D(128, (2, 3, 3)))
    model.add(layers.Conv3D(128, (1, 3, 3)))
    model.add(layers.MaxPooling3D((1, 2, 2)))
    model.add(layers.Conv3D(256, (2, 3, 3)))
    model.add(layers.Conv3D(256, (1, 4, 4)))
    model.add(layers.MaxPooling3D((1, 2, 2)))
    model.add(layers.Conv3D(512, (2, 3, 3)))
    model.add(layers.Conv3D(512, (1, 3, 3)))
    model.add(layers.MaxPooling3D((1, 2, 2)))
    model.add(layers.Conv3D(1024, (2, 3, 3)))
    model.add(layers.Conv3D(1024, (1, 4, 4)))
    model.add(layers.MaxPooling3D((1, 2, 2)))

    model.add(layers.Reshape((8, 8, 1024)))

    model.add(layers.Conv2D(1024, (3, 3)))
    model.add(layers.Conv2DTranspose(3, (15, 15)))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(3, (3, 3)))
    model.add(layers.Reshape((8 * 8, 3)))
    model.add(layers.Conv1D(3, (15)))

    return model
Пример #2
0
def build_model(input_shape, target_size, dropout=0):
    """Construct the CosmoFlow 3D CNN model"""

    conv_args = dict(kernel_size=2, padding='valid')

    model = tf.keras.models.Sequential([
        layers.Conv3D(16, input_shape=input_shape, **conv_args),
        layers.LeakyReLU(),
        layers.MaxPool3D(pool_size=2),
        layers.Conv3D(16, **conv_args),
        layers.LeakyReLU(),
        layers.MaxPool3D(pool_size=2),
        layers.Conv3D(16, **conv_args),
        layers.LeakyReLU(),
        layers.MaxPool3D(pool_size=2),
        layers.Conv3D(16, **conv_args),
        layers.LeakyReLU(),
        layers.MaxPool3D(pool_size=2),
        layers.Conv3D(16, **conv_args),
        layers.LeakyReLU(),
        layers.MaxPool3D(pool_size=2),
        layers.Flatten(),
        layers.Dropout(dropout),
        layers.Dense(128),
        layers.LeakyReLU(),
        layers.Dropout(dropout),
        layers.Dense(64),
        layers.LeakyReLU(),
        layers.Dropout(dropout),
        layers.Dense(target_size, activation='tanh'),
        layers.Lambda(scale_1p2)
    ])

    return model
Пример #3
0
 def __init__(self):
     super(VNetOutBlock, self).__init__()             
     self.final = layers.Conv3D(filters=2, kernel_size=(1,1,1), strides=1, 
                                      padding='valid', kernel_initializer='he_normal', activation='relu')
     
     self.binary = layers.Conv3D(filters=1, kernel_size=(1,1,1), strides=1, 
                                      padding='valid', kernel_initializer='he_normal', activation='sigmoid')
Пример #4
0
def first_block(layer_in, f1N2, f3):
    merge_input = layer_in
    
    merge_input = layers.Conv3D(f3, (1,1,1), strides=(2,2,2), kernel_initializer='he_normal')(merge_input)
    merge_input = layers.BatchNormalization()(merge_input)
    
    #conv1
    conv1 = layers.Conv3D(f1N2, (1,1,1), strides=(2,2,2), kernel_initializer='he_normal')(layer_in)
    conv1 = layers.BatchNormalization()(conv1)
    conv1 = layers.Activation('relu')(conv1)
    
    # conv2
    conv2 = layers.Conv3D(f1N2, (3,3,3), padding='same', kernel_initializer='he_normal')(conv1)
    conv2 = layers.BatchNormalization()(conv2)
    conv2 = layers.Activation('relu')(conv2)
    
    #conv3
    conv3 = layers.Conv3D(f3, (1,1,1), padding='same', kernel_initializer='he_normal')(conv2)
    conv3 = layers.BatchNormalization()(conv3)
    
    # add filters, assumes filters/channels last
    layer_out = layers.Add()([conv3, merge_input])
    # activation function
    layer_out = layers.Activation('relu')(layer_out)
    return layer_out
Пример #5
0
 def __init__(self,
              inplanes1,
              outplanes1,
              outplanes2,
              kernel=3,
              activation=None,
              **kwargs):
     if kwargs.get("padding") and kwargs["padding"].upper() != "SAME":
         raise NotImplementedError("Only implemented for padding 'SAME'")
     kwargs["padding"] = "SAME"
     super(BasicBlock, self).__init__()
     self.conv1 = layers.Conv3D(outplanes1,
                                kernel_size=kernel,
                                dilation_rate=2,
                                padding="SAME")
     # 残差块的第一个卷积层
     self.bn1 = layers.BatchNormalization()
     # 将卷积层输出的数据批量归一化
     self.relu = layers.ReLU()
     # import归一化后进行线性计算
     self.conv2 = layers.Conv3D(outplanes2,
                                kernel_size=kernel,
                                dilation_rate=2,
                                padding="SAME")
     # 残差块的第二个卷积层
     self.bn2 = layers.BatchNormalization()
     # 将卷积层输出的数据批量归一化
     print("test_point")
     if inplanes1 == outplanes2:
         self.downsample = lambda x: x
     else:
         self.downsample = Sequential()
         self.downsample.add(layers.Conv3D(outplanes2, kernel_size=1))
         self.downsample.add(layers.BatchNormalization())
Пример #6
0
    def __init__(self, filter_num, name, stride=1, **kwargs):
        super(BasicBlock, self).__init__(**kwargs)
        self.filter_num = filter_num
        self.stride = stride
        self.layers = []
        self.conv1 = layers.Conv3D(filter_num, (2, 3, 3),
                                   strides=(1, stride, stride),
                                   padding='same',
                                   name=name + '_1')
        # self.bn1=layers.BatchNormalization()
        self.relu = layers.Activation('relu')

        self.conv2 = layers.Conv3D(filter_num, (2, 3, 3),
                                   strides=1,
                                   padding='same',
                                   name=name + '_2')
        # self.bn2 = layers.BatchNormalization()
        self.layers.append(self.conv1)
        self.layers.append(self.conv2)
        # self.layers.append(self.bn1)
        # self.layers.append(self.bn2)
        if stride != 1:
            self.downsample = models.Sequential()
            self.downsample.add(
                layers.Conv3D(filter_num, (1, 1, 1),
                              strides=(1, stride, stride)))
            self.layers.append(self.downsample)
        else:
            self.downsample = lambda x: x
Пример #7
0
    def up_conv_block(self, m, prev, filters_a, filters_b):
        """3D up-convolution block."""
        m = layers.Conv3DTranspose(
            filters_a,
            self.transpose_kernel_size,
            strides=(2, 2, 1),
            padding="same",
            activation=self.activation,
        )(m)
        m = layers.BatchNormalization()(m)

        m = layers.Concatenate()([m, prev])

        m = layers.Conv3D(filters_b,
                          self.kernel_size,
                          padding="same",
                          activation=self.activation)(m)
        m = layers.BatchNormalization()(m)

        m = layers.Conv3D(filters_b,
                          self.kernel_size,
                          padding="same",
                          activation=self.activation)(m)
        m = layers.BatchNormalization()(m)

        return m
Пример #8
0
def create_block_components(names=None, dims=2):

    # --- padding == same, z-size == 1
    kwargs_z1 = {
        'kernel_size': (1, 3, 3) if dims == 2 else (3, 3, 3),
        'padding': 'same',
        'kernel_initializer': 'he_normal'}

    # --- padding = valid, z-size == 2
    kwargs_z2 = {
        'kernel_size': (2, 1, 1),
        'padding': 'valid',
        'kernel_initializer': 'he_normal'}

    # --- padding = valid, z-size == 2
    kwargs_z3 = {
        'kernel_size': (3, 1, 1),
        'padding': 'valid',
        'kernel_initializer': 'he_normal'}

    # --- Define block components
    conv_z1 = lambda x, filters, strides : layers.Conv3D(filters=filters, strides=strides, **kwargs_z1)(x)
    conv_z2 = lambda x, filters, strides : layers.Conv3D(filters=filters, strides=strides, **kwargs_z2)(x)
    conv_z3 = lambda x, filters, strides : layers.Conv3D(filters=filters, strides=strides, **kwargs_z3)(x)
    tran_z1 = lambda x, filters, strides : layers.Conv3DTranspose(filters=filters, strides=strides, **kwargs_z1)(x)
    conv_fc = lambda x, filters : (x)

    norm = lambda x : layers.BatchNormalization()(x)
    relu = lambda x : layers.LeakyReLU()(x)

    # --- Return local vars
    names = names or ('conv_z1', 'conv_z2', 'conv_z3', 'tran_z1', 'conv_fc', 'norm', 'relu')
    lvars = locals()

    return [lvars.get(n) for n in names] 
Пример #9
0
  def __init__(self, out_shape, strides=1, ksize = 3, shortcut = False):
    super(ResBlock_generator, self).__init__()
    self.shortcut = shortcut
    
    # self.upSample = layers.UpSampling3D()
    self.conv_0 = layers.Conv3DTranspose(out_shape,kernel_size = ksize, strides=2,padding='same', name = 'rg_conv1',  use_bias=False)
    self.bn_0 = layers.BatchNormalization()
    self.PRelu0 = layers.LeakyReLU(name='G_LeakyReLU1')
    self.conv_1 =layers.Conv3D(out_shape,kernel_size = ksize ,strides=1,padding='same', name = 'rg_conv2', use_bias=False)
    self.bn_1 = layers.BatchNormalization()
    self.PRelu1 = layers.LeakyReLU(name='G_LeakyReLU2')
    self.conv_2 =layers.Conv3D(out_shape,kernel_size = ksize ,strides=1,padding='same', name = 'rg_conv3', use_bias=False)
    self.bn_2 = layers.BatchNormalization()
    self.PRelu2 = layers.LeakyReLU(name='G_LeakyReLU3')
    self.conv_3 =layers.Conv3D(out_shape,kernel_size = ksize ,strides=1,padding='same', name = 'rg_conv4', use_bias=False)
    
    self.bn_3 = layers.BatchNormalization()
    

    if shortcut:
  #   self.upSample_shortcut = layers.UpSampling3D()
      self.conv_shortcut = layers.Conv3DTranspose(out_shape,kernel_size=1,strides=2, padding='same', use_bias=False)
      

    self.PRelu3 = layers.LeakyReLU(name='G_LeakyReLU4')
Пример #10
0
def feature_pyramid_3d(inputs, filter_ratio):
    kwargs1 = {
        'kernel_size': (1, 1, 1),
        'padding': 'valid',
    }
    kwargs3 = {
        'kernel_size': (1, 3, 3),
        'padding': 'same',
    }
    conv1 = lambda x, filters, strides: layers.Conv3D(
        filters=filters, strides=strides, **kwargs1)(x)
    add = lambda x, y: layers.Add()([x, y])
    upsamp2x = lambda x: layers.UpSampling3D(size=(1, 2, 2))(x)
    fp_block = lambda x, y: add(upsamp2x(x),
                                conv1(y, int(256 * filter_ratio), strides=1))
    conv3 = lambda x, filters, strides: layers.Conv3D(
        filters=filters, strides=strides, **kwargs3)(x)
    relu = lambda x: layers.LeakyReLU()(x)

    p5 = conv1(inputs[2], int(256 * filter_ratio), strides=1)
    fp4 = fp_block(p5, inputs[1])
    p4 = conv3(fp4, int(256 * filter_ratio), strides=1)
    fp3 = fp_block(fp4, inputs[0])
    p3 = conv3(fp3, int(256 * filter_ratio), strides=1)
    p6 = conv3(p5, int(256 * filter_ratio), strides=(1, 2, 2))
    p7 = conv3(relu(p6), int(256 * filter_ratio), strides=(1, 2, 2))
    return [p3, p4, p5, p6, p7]
Пример #11
0
def regression_head(A, filter_ratio=1, tahn=False):
    kwargs3 = {
        'kernel_size': (1, 3, 3),
        'padding': 'same',
    }
    conv3 = lambda x, filters, strides: layers.Conv3D(
        filters=filters, strides=strides, **kwargs3)(x)
    # added tahn activation
    conv3_final = lambda x, filters, strides: layers.Conv3D(filters=filters,
                                                            strides=strides,
                                                            activation=
                                                            activations.tanh,
                                                            **kwargs3)(x)
    relu = lambda x: layers.LeakyReLU()(x)
    conv_class1 = lambda filters, x: relu(conv3(x, filters, strides=1))

    inputs = Input(shape=(None, None, None, int(256 * filter_ratio)))
    b1 = conv_class1(
        int(256 * filter_ratio),
        conv_class1(
            int(256 * filter_ratio),
            conv_class1(int(256 * filter_ratio),
                        conv_class1(int(256 * filter_ratio), inputs))))
    # changed to use tahn activation
    if tahn:
        b2 = conv3_final(b1, 4 * A, strides=1)
    else:
        b2 = conv3(b1, 4 * A, strides=1)

    box_subnet = Model(inputs=inputs, outputs=b2)
    return box_subnet
Пример #12
0
def Unet3D_MultiDecorders(shape, num_class=1, filters=32, model_depth=3, activation='sigmoid', pooling='max', layer_act='relu', bn_axis=-1):    
    nb_filter = [filters*2**i for i in range(5)]
    img_input = Input(shape=shape, name='main_input')

    conv1_1 = standard_unit(img_input, stage=model_depth-3, nb_filter=nb_filter[model_depth-3], layer_act=layer_act)
    pool1 = pooling_unit(conv1_1, stage=model_depth-3, pooling=pooling)

    conv2_1 = standard_unit(pool1, stage=model_depth-2, nb_filter=nb_filter[model_depth-2], layer_act=layer_act)
    pool2 = pooling_unit(conv2_1, stage=model_depth-2, pooling=pooling)

    conv3_1 = standard_unit(pool2, stage=model_depth-1, nb_filter=nb_filter[model_depth-1], layer_act=layer_act)
    pool3 = pooling_unit(conv3_1, stage=model_depth-1, pooling=pooling)

    conv4_1 = standard_unit(pool3, stage=model_depth, nb_filter=nb_filter[model_depth], layer_act=layer_act)

    conv3_2 = up_unit(conv4_1, conv3_1, stage=model_depth-1, nb_filter=nb_filter[model_depth-1], layer_act=layer_act)                             
    conv2_2 = up_unit(conv3_2, conv2_1, stage=model_depth-2, nb_filter=nb_filter[model_depth-2], layer_act=layer_act)                             
    conv1_2 = up_unit(conv2_2, conv1_1, stage=model_depth-3, nb_filter=nb_filter[model_depth-3], layer_act=layer_act)

    conv3_3 = up_unit(conv4_1, conv3_1, stage=model_depth-1, nb_filter=nb_filter[model_depth-1], layer_act=layer_act)                             
    conv2_3 = up_unit(conv3_3, conv2_1, stage=model_depth-2, nb_filter=nb_filter[model_depth-2], layer_act=layer_act)                             
    conv1_3 = up_unit(conv2_3, conv1_1, stage=model_depth-3, nb_filter=nb_filter[model_depth-3], layer_act=layer_act)
    
    output1 = layers.Conv3D(num_class, (1, 1, 1), activation=activation, name='output1', kernel_initializer='he_normal', padding='same')(conv1_2)
    output2 = layers.Conv3D(num_class, (1, 1, 1), activation=activation, name='output2', kernel_initializer='he_normal', padding='same')(conv1_3)

    return Model(inputs=img_input, outputs=[output1,output2])
Пример #13
0
def make_discriminator_model():

    inputshape = (DIMENSIONS[0], DIMENSIONS[1], DIMENSIONS[2], CHANNELS)

    model = tf.keras.Sequential()

    model.add(
        layers.Conv3D(CHANNELS, (5, 5, 5),
                      strides=(2, 2, 2),
                      padding='same',
                      input_shape=inputshape))
    #TODO: Fill in input_shape above

    model.add(layers.LeakyReLU())
    #TODO: Add dropout here
    model.add(layers.Dropout(0.3))

    model.add(
        layers.Conv3D(CHANNELS * 2, (5, 5, 5),
                      strides=(2, 2, 2),
                      padding='same'))
    model.add(layers.LeakyReLU())
    #TODO: Add dropout here
    model.add(layers.Dropout(0.3))

    model.add(layers.Flatten())
    #TODO: Add final Dense layer here - how many outputs?
    model.add(layers.Dense(1, activation='sigmoid'))

    return model
def conv_block_3d(x, growth_rate, name):
    """A building block for a dense block.

    Arguments:
    x: input tensor.
    growth_rate: float, growth rate at dense layers.
    name: string, block label.

    Returns:
    Output tensor for the block.
    """
    x1 = layers.BatchNormalization(axis=-1,
                                   epsilon=1.001e-5,
                                   name=name + '_0_bn')(x)
    x1 = layers.Activation('relu', name=name + '_0_relu')(x1)
    x1 = layers.Conv3D(4 * growth_rate,
                       1,
                       use_bias=False,
                       name=name + '_1_conv',
                       padding='same')(x1)
    x1 = layers.BatchNormalization(axis=-1,
                                   epsilon=1.001e-5,
                                   name=name + '_1_bn')(x1)
    x1 = layers.Activation('relu', name=name + '_1_relu')(x1)
    x1 = layers.Conv3D(growth_rate, (3, 1, 1),
                       padding='same',
                       use_bias=False,
                       name=name + '_2_conv')(x1)
    x = layers.Concatenate(axis=-1, name=name + '_concat')([x, x1])
    return x
Пример #15
0
def create_voxnet_model_small(input_shape, output_size):
    """
    Creates a small VoxNet.

    See: http://dimatura.net/publications/3dcnn_lz_maturana_scherer_icra15.pdf

    Args:
        input_shape (shape): Input-shape.
        output_size (int): Output-size.

    Returns:
        Model: A model.
    """

    #Trainable params: 301,378
    model = models.Sequential(name="C7-F32-P2-C5-F64-P2-D512")
    model.add(
        layers.Reshape(target_shape=input_shape + (1, ),
                       input_shape=input_shape))
    model.add(layers.Conv3D(32, (7, 7, 7), activation="relu"))
    model.add(layers.MaxPooling3D((4, 4, 4)))
    model.add(layers.Conv3D(64, (5, 5, 5), activation="relu"))
    model.add(layers.MaxPooling3D((2, 2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dense(512, activation="relu"))
    model.add(layers.Dense(output_size))

    return model
Пример #16
0
    def __init__(self, language_model: seq2seq.Seq2Seq = None):
        """Initialize the architecture."""
        super().__init__(NAME)

        if language_model is None:
            self.language_model = seq2seq.Gru()
        else:
            self.language_model = language_model

        self.scaling_ghi = preprocessing.min_max_scaling_ghi()

        self.flatten = layers.Flatten()
        self.max_pool = layers.MaxPooling3D((1, 2, 2))

        self.conv1 = layers.Conv3D(64,
                                   kernel_size=(1, 3, 3),
                                   padding="same",
                                   activation="relu")
        self.conv2 = layers.Conv3D(128,
                                   kernel_size=(1, 3, 3),
                                   padding="same",
                                   activation="relu")
        self.conv3 = layers.Conv3D(128,
                                   kernel_size=(1, 3, 3),
                                   padding="same",
                                   activation="relu")

        self.d1 = layers.Dense(512, activation="relu")
        self.d2 = layers.Dense(256, activation="relu")
        self.d3 = layers.Dense(4)
Пример #17
0
def create_voxnet_model_homepage(input_shape, output_size):
    """
    Creates a small VoxNet.

    See: http://dimatura.net/publications/3dcnn_lz_maturana_scherer_icra15.pdf

    Note: This is the latest model that the VoxNet-authors used.

    Args:
        input_shape (shape): Input-shape.
        output_size (int): Output-size.

    Returns:
        Model: A model.
    """

    # Trainable params: 916,834
    model = models.Sequential(name="VoxNetHomepage")
    model.add(
        layers.Reshape(target_shape=input_shape + (1, ),
                       input_shape=input_shape))
    model.add(
        layers.Conv3D(32, (5, 5, 5), strides=(2, 2, 2), activation="relu"))
    model.add(
        layers.Conv3D(32, (3, 3, 3), strides=(1, 1, 1), activation="relu"))
    model.add(layers.MaxPooling3D((2, 2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dense(128, activation="relu"))
    model.add(layers.Dense(output_size))

    return model
        def CNN_model(
            Width,
            Height,
        ):
            #nb_filters = 64
            nb_conv = 5

            model = models.Sequential()
            # Convlolutional layers
            model.add(
                layers.Conv3D(48,
                              kernel_size=(nb_conv, nb_conv, 4),
                              padding='valid',
                              strides=(2, 2, 1),
                              dilation_rate=1,
                              input_shape=(Width, Height, 4, 1)))
            model.add(layers.BatchNormalization())
            model.add(layers.Activation('relu'))
            model.add(
                layers.Conv3D(128,
                              kernel_size=(nb_conv, nb_conv, 1),
                              padding='valid',
                              strides=(2, 2, 1),
                              dilation_rate=1))
            model.add(layers.BatchNormalization())
            model.add(layers.Activation('relu'))

            model.add(layers.Flatten())
            # Dense layers
            # model.add(layers.Dense(390))
            # model.add(layers.BatchNormalization())
            # model.add(layers.Activation('relu'))

            model.add(layers.Dense(300))
            model.add(layers.BatchNormalization())
            model.add(layers.Activation('relu'))
            model.add(layers.Dropout(1 - 0.7))

            model.add(layers.Dense(40))
            model.add(layers.BatchNormalization())
            model.add(layers.Activation('relu'))
            model.add(layers.Dropout(1 - 0.7))

            model.add(layers.Dense(1))

            initial_learning_rate = 0.000711709607385886
            lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
                initial_learning_rate,
                decay_steps=100000,
                decay_rate=0.96,
                staircase=True)
            model.compile(
                optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule),
                loss='mse',
                metrics=['mse'])

            #opt = tf.keras.optimizers.Adam(lr=0.0001)

            #model.compile(loss='mse', optimizer = opt)
            return model
Пример #19
0
    def bottleneck(inputs, filters, kernel_size, norm="instance"):
        x = layers.Conv3D(
            filters,
            kernel_size=kernel_size,
            strides=2,
            kernel_initializer="he_normal",
            padding="same",
        )(inputs)
        if norm == "instance":
            x = InstanceNormalization()(x)
        if norm == "batch":
            x = layers.BatchNormalization()(x)
        x = layers.LeakyReLU()(x)
        for i in range(4):
            y = layers.Conv3D(
                filters,
                kernel_size=kernel_size,
                strides=1,
                kernel_initializer="he_normal",
                padding="same",
            )(x)
            if norm == "instance":
                x = InstanceNormalization()(y)
            if norm == "batch":
                x = layers.BatchNormalization()(y)
            x = layers.LeakyReLU()(x)
            x = layers.Concatenate()([x, y])

        return x
Пример #20
0
def get_model(width=32, height=32, depth=20):
    """Build a 3D convolutional neural network model."""

    inputs = keras.Input((width, height, depth, 1))

    x = layers.Conv3D(filters=32, kernel_size=3, activation="relu")(inputs)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=32, kernel_size=4, activation="relu")(x)
    x = layers.MaxPool3D(pool_size=(1, 1, 2))(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=32, kernel_size=3, activation="relu")(x)
    x = layers.MaxPool3D(pool_size=(2, 2, 1))(x)
    x = layers.BatchNormalization()(x)
    x = layers.GlobalMaxPool3D()(x)
    x = layers.Dense(units=64, activation="relu")(x)
    x = layers.Dropout(0.3)(x)
    x = layers.Dense(units=128, activation="relu")(x)
    x = layers.Dropout(0.3)(x)
    outputs = layers.Dense(units=1, activation='sigmoid')(x)

    # Define the model.
    model = keras.Model(inputs, outputs, name="3dcnn")
    return model
Пример #21
0
def generate_cnn():
    """
    Function to generate a Convolutional Neural Network
    to estimate entropy from (56,56,56) voxel occupancy grids.

    :return: Keras.Model
    """
    inputs = keras.Input(shape=(56, 56, 56))
    base = layers.Reshape(target_shape=(56, 56, 56, 1))(inputs)

    # cnn_a_filters = hp.Int('cnn1_filters', min_value=4, max_value=16, step=4)
    a = layers.Conv3D(8, (5, 5, 5), activation='relu', padding='same')(base)
    a = layers.AveragePooling3D(pool_size=(2, 2, 2))(a)
    a = layers.BatchNormalization()(a)
    a = layers.Dropout(0.25)(a)
    a = layers.Flatten()(a)

    # cnn_b_filters = hp.Int('cnn2_filters', min_value=4, max_value=16, step=4)
    b = layers.Conv3D(8, (3, 3, 3), activation='relu', padding='same')(base)
    b = layers.AveragePooling3D(pool_size=(2, 2, 2))(b)
    b = layers.BatchNormalization()(b)
    b = layers.Dropout(0.25)(b)
    b = layers.Flatten()(b)

    x = layers.Concatenate(axis=1)([a, b])
    # dense_units = hp.Int('dense_units', min_value=256, max_value=512, step=64)
    x = layers.Dense(512, activation='relu')(x)
    x = layers.BatchNormalization()(x)
    x = layers.Dropout(0.5)(x)
    outputs = layers.Dense(60, activation='linear')(x)

    model = keras.Model(inputs=inputs, outputs=outputs, name='entronet')
    model.compile(optimizer=keras.optimizers.Adam(learning_rate=5e-5), loss='mae', metrics=['mse'])
    model.summary()
    return model
Пример #22
0
def get_model(x):
    x = layers.Conv3D(64, (3, 3, 3),
                      strides=(1, 1, 1),
                      padding='same',
                      activation='relu')(x)
    x = layers.MaxPool3D((2, 2, 1), strides=(2, 2, 1), padding='same')(x)
    #    x = layers.SpatialDropout3D(0.35)(x)
    x = layers.Conv3D(128, (3, 3, 3),
                      strides=(1, 1, 1),
                      padding='same',
                      activation='relu')(x)
    x = layers.MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)
    #    x = layers.SpatialDropout3D(0.35)(x)
    x = layers.Conv3D(128, (3, 3, 3),
                      strides=(1, 1, 1),
                      padding='same',
                      activation='relu')(x)
    x = layers.MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)
    #    x = layers.SpatialDropout3D(0.35)(x)
    x = layers.Conv3D(256, (3, 3, 3),
                      strides=(1, 1, 1),
                      padding='same',
                      activation='relu')(x)
    x = layers.MaxPool3D((2, 2, 2), strides=(2, 2, 2))(x)
    #    x = layers.SpatialDropout3D(0.35)(x)
    #    x = layers.Conv3D(256, (3, 3, 3), strides=(1, 1, 1), padding='same',
    #                      activation='relu')(x)
    #  x = layers.SpatialDropout3D(0.5)(x)
    #    x = layers.MaxPool3D((2, 2, 2), strides=(2, 2, 2), padding='same')(x)

    x = layers.Flatten()(x)
    return x
Пример #23
0
    def __init__(self, out_shape, strides=1, ksize=3, shortcut=False):
        super(ResBlock_discriminator, self).__init__()
        self.shortcut = shortcut
        self.conv_0 = tfa.layers.SpectralNormalization(
            layers.Conv3D(out_shape,
                          kernel_size=ksize,
                          strides=2,
                          padding='same',
                          name='rd_conv1',
                          use_bias=False))
        self.PRelu0 = layers.LeakyReLU(name='D_LeakyReLU0')
        #self.bn_1 = layers.BatchNormalization()
        # self.conv_1 = tfa.layers.SpectralNormalization(layers.Conv3D(out_shape,kernel_size=ksize,strides=1,padding='same', name = 'rd_conv1', use_bias=False))
        # self.PRelu1 = layers.LeakyReLU(name='D_LeakyReLU1')
        # self.conv_2 = tfa.layers.SpectralNormalization(layers.Conv3D(out_shape,kernel_size=ksize,strides=1,padding='same', name = 'rd_conv1', use_bias=False))
        #self.bn_1 = layers.BatchNormalization()
        #self.bn_1 = layers.BatchNormalization()

        #self.bn_2= layers.BatchNormalization()
        #   self.average_pool1 = layers.AveragePooling3D()

        #shortcut
        if shortcut:
            self.conv_shortcut = tfa.layers.SpectralNormalization(
                layers.Conv3D(out_shape,
                              kernel_size=1,
                              strides=2,
                              padding='valid',
                              use_bias=False))
Пример #24
0
def create_model(height=240, width=320):
    # shape of input: 1 block has 10 frames x height x width x 3 channels (RGB)
    input = tf.keras.Input((10, height, width, 3))

    # 1st Conv3D block includes Conv3D with 8 filters, MaxPool3D and BatchNormalization
    x = layers.Conv3D(filters=8, kernel_size=(3, 3, 3),
                      activation='relu')(input)
    x = layers.MaxPool3D(pool_size=(2, 2, 2))(x)
    x = layers.BatchNormalization()(x)

    # 2nd Conv3D block includes Conv3D with 16 filters, MaxPool3D and BatchNormalization
    x = layers.Conv3D(filters=16, kernel_size=(3, 3, 3), activation='relu')(x)
    x = layers.MaxPool3D(pool_size=(2, 2, 2))(x)
    x = layers.BatchNormalization()(x)

    # 3rd Conv3D block includes Conv3D with 32 filters, MaxPool3D and BatchNormalization
    x = layers.Conv3D(filters=32, kernel_size=(3, 3, 3),
                      activation='relu')(input)
    x = layers.MaxPool3D(pool_size=(1, 2, 2))(x)
    x = layers.BatchNormalization()(x)

    # Fully-connected block includes GlobalAveragePooling3D, Fully-Connected layer with 512 units and DropOut for Regularization
    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Dense(units=512, activation='relu')(x)
    x = layers.DropOut(0.7)(x)

    # output shape (1,) produces value between [0, 1]
    output = layers.Dense(units=1, activation='sigmoid')(x)

    model = tf.keras.Model(input, output, name='3DCNN')
    return model
def conv_block_3d(x, growth_rate, name, GN=True):
    """A building block for a dense block.

    Arguments:
    x: input tensor.
    growth_rate: float, growth rate at dense layers.
    name: string, block label.

    Returns:
    Output tensor for the block.
    """
    if GN:
        x1 = GroupNormalization(groups=2, axis=-1, name=name + '_0_gn')(x)
    else:
        x1 = layers.BatchNormalization(name=name + '_0_bn')(x)
    # x1 = layers.BatchNormalization(axis=-1, epsilon=1.001e-5, name=name + '_0_bn')(x)
    x1 = layers.Activation('selu', name=name + '_0_selu')(x1)
    x1 = layers.Conv3D(2 * growth_rate,
                       1,
                       use_bias=False,
                       name=name + '_1_conv',
                       padding='same')(x1)
    # x1 = layers.BatchNormalization(axis=-1, epsilon=1.001e-5, name=name + '_1_bn')(x1)
    if GN:
        x1 = GroupNormalization(groups=2, axis=-1, name=name + '_1_gn')(x1)
    else:
        x1 = layers.BatchNormalization(name=name + '_1_bn')(x1)
    x1 = layers.Activation('selu', name=name + '_1_selu')(x1)
    x1 = layers.Conv3D(growth_rate,
                       3,
                       padding='same',
                       use_bias=False,
                       name=name + '_2_conv')(x1)
    x = layers.Concatenate(axis=-1, name=name + '_concat')([x, x1])
    return x
Пример #26
0
 def __init__(self, ef_dim):
     super(encoder, self).__init__()
     self.ef_dim = ef_dim
     initializer = tf.keras.initializers.GlorotUniform()
     self.conv_1 = layers.Conv3D(self.ef_dim,
                                 4,
                                 strides=2,
                                 padding="same",
                                 use_bias=True,
                                 kernel_initializer=initializer)
     self.conv_2 = layers.Conv3D(self.ef_dim * 2,
                                 4,
                                 strides=2,
                                 padding="same",
                                 use_bias=True,
                                 kernel_initializer=initializer)
     self.conv_3 = layers.Conv3D(self.ef_dim * 4,
                                 4,
                                 strides=2,
                                 padding="same",
                                 use_bias=True,
                                 kernel_initializer=initializer)
     self.conv_4 = layers.Conv3D(self.ef_dim * 8,
                                 4,
                                 strides=2,
                                 padding="same",
                                 use_bias=True,
                                 kernel_initializer=initializer)
     self.conv_5 = layers.Conv3D(self.ef_dim * 8,
                                 4,
                                 strides=1,
                                 padding="valid",
                                 use_bias=True,
                                 kernel_initializer=initializer)
Пример #27
0
def TemporalTransitionLayer(inputs, TTL_config, i):
  x1 = layers.BatchNormalization()(inputs)
  x1 = layers.Activation('relu')(x1)
  x1 = layers.Conv3D(filters=128,
                    kernel_size=(1,1,1),
                    activation=None,
                    padding='same',
                    use_bias=False)(x1)

  x2 = layers.BatchNormalization()(inputs)
  x2 = layers.Activation('relu')(x2)
  x2 = layers.Conv3D(filters=128,
                    kernel_size=(3,3,3),
                    activation=None,
                    padding='same', 
                    use_bias=False)(x2)

  x3 = layers.BatchNormalization()(inputs)
  x3 = layers.Activation('relu')(x3)
  x3 = layers.Conv3D(filters=128,
                    kernel_size=(TTL_config[i],3,3),
                    activation=None,
                    padding='same',
                    use_bias=False)(x3)

  x = tf.concat([x1, x2, x3], 4)
  x = layers.AveragePooling3D(pool_size=(2,2,2),
                              strides=(2,2,2))(x)
  return(x)
    def __init__(self,
                 filters,
                 kernel_size,
                 name,
                 use_bn=True,
                 use_res=True,
                 padding='SAME',
                 use_bias=True):
        super().__init__(name=name)
        self.use_bn = use_bn
        self.use_res = use_res
        # stddev = np.sqrt(2/(kernel_size**2*filters))
        self.conv1 = layers.Conv3D(
            filters,
            kernel_size,
            padding=padding,
            use_bias=use_bias,
            # kernel_initializer=initializers.TruncatedNormal(stddev=stddev),
            name='conv1')
        self.conv2 = layers.Conv3D(
            filters,
            kernel_size,
            padding=padding,
            use_bias=use_bias,
            # kernel_initializer=initializers.TruncatedNormal(stddev=stddev),
            name='conv2')

        if use_bn:
            self.bn1 = layers.BatchNormalization(momentum=0.99, name='bn1')
            self.bn2 = layers.BatchNormalization(momentum=0.99, name='bn2')
        if use_res:
            self.res = _Residual('res')
def get_model(width=128, height=128, depth=64):
    """build a 3D convolutional neural network model"""

    inputs = keras.Input((width, height, depth, 1))

    x = layers.Conv3D(filters=64, kernel_size=3, activation="relu")(inputs)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=64, kernel_size=3, activation="relu")(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=128, kernel_size=3, activation="relu")(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=256, kernel_size=3, activation="relu")(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Dense(units=512, activation="relu")(x)
    x = layers.Dropout(0.3)(x)

    outputs = layers.Dense(units=1, activation="sigmoid")(x)

    # Define the model.
    model = keras.Model(inputs, outputs, name="3dcnn")
    return model
Пример #30
0
def discriminator():

    skernel = (4, 4, 4)
    sstride = (2, 2, 2)
    model = tf.keras.Sequential()
    #3D convs
    model.add(
        layers.Conv3D(32,
                      skernel,
                      strides=sstride,
                      padding='same',
                      input_shape=[DIMENSN, DIMENSN, DIMENSN, 1]))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(0.3))
    model.add(layers.Conv3D(64, skernel, strides=sstride, padding='same'))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(0.3))
    model.add(layers.Conv3D(128, skernel, strides=sstride, padding='same'))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(0.3))
    model.add(layers.Conv3D(256, skernel, strides=sstride, padding='same'))
    model.add(layers.LeakyReLU())
    model.add(layers.Dropout(0.3))
    #Final layer
    model.add(layers.Flatten())
    model.add(layers.Dense(1))
    return model