def build_model(columns, rows, depth, output_units, kernel_size):
    # Based on: https://keras.io/examples/vision/3D_image_classification/
    pool_size = 2

    # Handle small dimensions
    if (columns <= 2 or rows <= 2):
        pool_size = 1
        kernel_size = 1

    # Handle odd dimension lengths
    if ((columns % 2 != 0) or (rows % 2 != 0)):
        kernel_size = 1

    inputs = keras.Input((depth, columns, rows, 1))
    x = layers.Conv3D(filters=64, kernel_size=kernel_size,
                      activation="relu")(inputs)
    x = layers.MaxPool3D(pool_size=pool_size)(x)
    x = layers.BatchNormalization()(x)

    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Dense(units=512, activation="relu")(x)
    x = layers.Dropout(0.3)(x)

    outputs = layers.Dense(units=output_units, activation="sigmoid")(x)

    model = keras.Model(inputs, outputs, name="3DCNN")
    return model
示例#2
0
def build_network(input_shapes, output_size, training, name = 'TreatmentRecommder'):
    '''
    build the network for covid-19 prediction of how long a patient can be cured
    '''
    dtype = tf.float32
    #treatment information
    treatment_info = KL.Input(shape = input_shapes[0], dtype = dtype, name='treatment_info') 
   
    #imaing information: CNN features from CT images
    image_info = KL.Input(shape = input_shapes[1]+[1], dtype = dtype, name='image_info')   
    base_filters = 16    
    x11 = KL.Conv3D(base_filters, (3, 3, 3), activation='relu', padding='same', name = 'x11')(image_info)  
    x12 = KL.Conv3D(base_filters, (3, 3, 3), activation='relu', padding='same', name = 'x12')(x11)  
    x13 = KL.Conv3D(base_filters, (3, 3, 3), activation='relu', padding='same', name = 'x13')(x12) 
    
    d1 = KL.MaxPool3D()(x13)
    
    x21 = KL.Conv3D(base_filters*2, (3, 3, 3), activation='relu', padding='same', name = 'x21')(d1)  
    x22 = KL.Conv3D(base_filters*2, (3, 3, 3), activation='relu', padding='same', name = 'x22')(x21)  
    
    d2 = KL.MaxPool3D()(x22)
    
    x31 = KL.Conv3D(base_filters*4, (3, 3, 3), activation='relu', padding='same', name = 'x31')(d2)  
    x32 = KL.Conv3D(base_filters*4, (3, 3, 3), activation='relu', padding='same', name = 'x32')(x31)  
   
    d3 = KL.MaxPool3D()(x32)
    
    x41 = KL.Conv3D(base_filters*8, (3, 3, 3), activation='relu', padding='same', name = 'x41')(d3)  
    x42 = KL.Conv3D(base_filters*8, (3, 3, 3), activation='relu', padding='same', name = 'x42')(x41)  
  
    d4 = KL.MaxPool3D()(x42)
    
    x51 = KL.Conv3D(base_filters*16, (3, 3, 3), activation='relu', padding='same', name = 'x51')(d4)  
    x52 = KL.Conv3D(base_filters*16, (3, 3, 3), activation='relu', padding='same', name = 'x52')(x51)  
 
    d5 = KL.MaxPool3D()(x52)
    cnn_GAP = KL.GlobalAveragePooling3D(name='CNN_GAP')(d5)
    cnn_cof = KL.Dense(1, activation='relu', name='cnn_cof')(cnn_GAP)
    
    #patient information
    patient_info = KL.Input(shape = input_shapes[2], dtype = dtype, name='patient_info')
    pcnn_info = KL.Concatenate()([patient_info, cnn_cof])    
    
    #cured probability distruibution subnetwork
    w_pcnn_info = SA_Module(pcnn_info, training)
    
    fc1 = KL.Dense(256, activation='relu', name='fc1')(KL.Concatenate()([w_pcnn_info, cnn_GAP, treatment_info])) 
    fc2 = KL.Dense(512, activation='relu', name='fc2')(fc1) 
    fc3 = KL.Dense(512, activation='relu', name='fc3')(fc2) 
   
    fc_cls = KL.Dense(256, activation='relu', name='fc_cls')(fc3) 
    fc_cls = KL.Dropout(0.4)(fc_cls, training = training)
    severity_cls_preds = KL.Dense(output_size[0],activation='softmax', name='severity_cls_preds')(fc_cls)
    
    fc_reg = KL.Dense(256, activation='relu', name='fc_reg')(fc3)
    fc_reg = KL.Dropout(0.4)(fc_reg, training = training)
    risk_reg_preds = KL.Dense(output_size[1],activation='softmax', name='risk_reg_preds')(fc_reg)
    
    model = KM.Model([treatment_info,image_info,patient_info], [severity_cls_preds, risk_reg_preds], name=name)
    return model
示例#3
0
def create_model(height=240, width=320):
    # shape of input: 1 block has 10 frames x height x width x 3 channels (RGB)
    input = tf.keras.Input((10, height, width, 3))

    # 1st Conv3D block includes Conv3D with 8 filters, MaxPool3D and BatchNormalization
    x = layers.Conv3D(filters=8, kernel_size=(3, 3, 3),
                      activation='relu')(input)
    x = layers.MaxPool3D(pool_size=(2, 2, 2))(x)
    x = layers.BatchNormalization()(x)

    # 2nd Conv3D block includes Conv3D with 16 filters, MaxPool3D and BatchNormalization
    x = layers.Conv3D(filters=16, kernel_size=(3, 3, 3), activation='relu')(x)
    x = layers.MaxPool3D(pool_size=(2, 2, 2))(x)
    x = layers.BatchNormalization()(x)

    # 3rd Conv3D block includes Conv3D with 32 filters, MaxPool3D and BatchNormalization
    x = layers.Conv3D(filters=32, kernel_size=(3, 3, 3),
                      activation='relu')(input)
    x = layers.MaxPool3D(pool_size=(1, 2, 2))(x)
    x = layers.BatchNormalization()(x)

    # Fully-connected block includes GlobalAveragePooling3D, Fully-Connected layer with 512 units and DropOut for Regularization
    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Dense(units=512, activation='relu')(x)
    x = layers.DropOut(0.7)(x)

    # output shape (1,) produces value between [0, 1]
    output = layers.Dense(units=1, activation='sigmoid')(x)

    model = tf.keras.Model(input, output, name='3DCNN')
    return model
示例#4
0
    def encoder():
        resnet = nobrainer.models.highresnet(
            n_classes=n_classes, input_shape=input_shape
        )

        input = tf.keras.layers.Input(shape=input_shape)

        resnet_out = resnet(input)

        x = layers.GlobalAveragePooling3D(name="backbone_pool")(resnet_out)

        x = layers.Dense(
            projection_dim,
            use_bias=False,
            kernel_regularizer=regularizers.l2(weight_decay),
        )(x)
        x = layers.BatchNormalization()(x)
        x = layers.LeakyReLU()(x)
        x = layers.Dense(
            projection_dim,
            use_bias=False,
            kernel_regularizer=regularizers.l2(weight_decay),
        )(x)
        output = layers.BatchNormalization()(x)

        encoder_model = tf.keras.Model(input, output, name="encoder")
        return encoder_model
示例#5
0
def resNet(sideLength):
    '''resNet 3D implementation'''
    input = keras.Input(shape=(sideLength, sideLength, sideLength, 1))
    
    x = layers.Conv3D(filters=64, kernel_size=7, strides=2, padding='same')(input)
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    x = layers.MaxPooling3D(2, strides=1)(x)
    
    x = first_block(x, 64, 256)
    for i in range(2): x = residual_block(x, 64, 256)
    x = first_block(x, 128, 512)
    for i in range(3): x = residual_block(x, 128, 512)
    x = first_block(x, 256, 1024)
    for i in range(5): x = residual_block(x, 256, 1024)
    
    #at the end
    x = layers.GlobalAveragePooling3D()(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    x = layers.Flatten()(x)
    x = layers.Dense(512, activation='relu')(x)
    x = layers.Dropout(0.2)(x)
    output = layers.Dense(1, activation='sigmoid')(x)
    
    model = keras.Model(input, output, name='resnet')
    return model
def get_model(width=128, height=128, depth=64):
    """build a 3D convolutional neural network model"""

    inputs = keras.Input((width, height, depth, 1))

    x = layers.Conv3D(filters=64, kernel_size=3, activation="relu")(inputs)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=64, kernel_size=3, activation="relu")(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=128, kernel_size=3, activation="relu")(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=256, kernel_size=3, activation="relu")(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Dense(units=512, activation="relu")(x)
    x = layers.Dropout(0.3)(x)

    outputs = layers.Dense(units=1, activation="sigmoid")(x)

    # Define the model.
    model = keras.Model(inputs, outputs, name="3dcnn")
    return model
示例#7
0
 def __init__(self, dim):
     super(_AP, self).__init__()
     if dim == 1:
         self.pool = layers.GlobalAveragePooling1D()
     elif dim == 2:
         self.pool = layers.GlobalAveragePooling2D()
     elif dim == 3:
         self.pool = layers.GlobalAveragePooling3D()
def build_model(columns, rows, depth, output_units):
    # Based on: https://keras.io/examples/vision/3D_image_classification/

    inputs = keras.Input((depth, columns, rows, 1))
    x = layers.Conv3D(filters=64,
                      kernel_size=settings.KERNEL_SIZE,
                      activation="relu")(inputs)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Dense(units=512, activation="relu")(x)
    x = layers.Dropout(0.3)(x)

    outputs = layers.Dense(units=output_units, activation="sigmoid")(x)

    model = keras.Model(inputs, outputs, name="3DCNN")
    return model
示例#9
0
def get_model(input_shape=(None, None, None, 1)):
    inputs = layers.Input(shape=input_shape)
    x = layers.Conv3D(filters=96, kernel_size=1, activation="relu")(inputs)
    x = layers.MaxPool3D(strides=2)(x)
    x = layers.BatchNormalization()(x)
    x = fire_block(x, 128)
    x = fire_block(x, 128)
    x = fire_block(x, 256)
    x = layers.MaxPool3D(strides=2)(x)
    x = fire_block(x, 256)
    x = fire_block(x, 384)
    x = fire_block(x, 384)
    x = fire_block(x, 512)
    x = layers.MaxPool3D(strides=2)(x)
    x = fire_block(x, 512)
    x = layers.Conv3D(filters=1, kernel_size=1, activation="relu")(x)
    x = layers.BatchNormalization()(x)
    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Softmax()(x)
    return tf.keras.Model(inputs=inputs, outputs=x, name="SqueezeNet")
def get_model_8L_(width=128, height=128, depth=64):
    """Build a 3D convolutional neural network model."""

    inputs = keras.Input((width, height, depth, 1))

    x = layers.Conv3D(filters=64, kernel_size=3, activation="relu")(inputs)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization(center=True, scale=True)(x)
    x = layers.Dropout(0.2)(x)

    x = layers.Conv3D(filters=64, kernel_size=3, activation="relu")(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization(center=True, scale=True)(x)
    x = layers.Dropout(0.5)(x)


    ### add 64N layer ##################################################
    x = layers.Conv3D(filters=64, kernel_size=3, activation="relu")(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization(center=True, scale=True)(x)
    x = layers.Dropout(0.3)(x)

    x = layers.Conv3D(filters=128, kernel_size=3, activation="relu")(x)
    x = layers.MaxPool3D(pool_size=1)(x)
    x = layers.BatchNormalization(center=True, scale=True)(x)
    x = layers.Dropout(0.3)(x)

    x = layers.Conv3D(filters=256, kernel_size=3, activation="relu")(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Dense(units=512, activation="relu")(x)
    x = layers.Dropout(0.3)(x)

    outputs = layers.Dense(units=1, activation="sigmoid")(x)
    # outputs = layers.Dense(units=1, activation="tanh", )(x)
    # outputs = layers.Dense(units=1, activation="relu", )(x)

    model = keras.Model(inputs, outputs, name="3dcnn")
    return model
示例#11
0
def get_model(width=224, height=224, depth=32):
    """Build a 3D convolutional neural network model."""

    inputs = keras.Input((width, height, depth, 1))

    x = layers.Conv3D(filters=64,
                      kernel_size=3,
                      activation="relu",
                      padding='same')(inputs)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=64,
                      kernel_size=3,
                      activation="relu",
                      padding='same')(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=128,
                      kernel_size=3,
                      activation="relu",
                      padding='same')(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.Conv3D(filters=256,
                      kernel_size=3,
                      activation="relu",
                      padding='same')(x)
    x = layers.MaxPool3D(pool_size=2)(x)
    x = layers.BatchNormalization()(x)

    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Dense(units=512, activation="relu")(x)
    x = layers.Dropout(0.3)(x)

    outputs = layers.Dense(units=51, activation="softmax")(x)

    model = keras.Model(inputs, outputs, name="3dcnn")
    return model
示例#12
0
def RFAB(x, filters, kernel_size, r):
    """Residual Feature attention Block"""
    x_res = x
    
    # Attention
    x = conv3d_weightnorm(filters, kernel_size)(x)
    x = layers.ReLU()(x)
    x = conv3d_weightnorm(filters, kernel_size)(x)
    
    x_to_scale = x
    
    x = layers.GlobalAveragePooling3D()(x)
    # expand to 1x1x1xc
    for i in range(3):
        x = layers.Lambda(lambda x: tf.expand_dims(x, axis=(-2)))(x)   
    x = conv3d_weightnorm(int(filters/r), 1)(x)
    x = layers.ReLU()(x)
    x = conv3d_weightnorm(filters, 1, activation='sigmoid')(x)
    
    x_scaled = x_to_scale * x
    
    return x_scaled + x_res
示例#13
0
def get_model(width=128, height=128, depth=8):
    """Build a 3D convolutional neural network model."""

    inputs = keras.Input((width, height, depth, 1))

    x = layers.Conv3D(filters=32,
                      kernel_size=3,
                      activation="relu",
                      padding='same')(inputs)
    x = layers.Conv3D(filters=32,
                      kernel_size=3,
                      activation="relu",
                      padding='same')(x)
    x = layers.Conv3D(filters=32,
                      kernel_size=3,
                      activation="relu",
                      padding='same')(x)
    x = layers.MaxPool3D(pool_size=(2, 2, 2), padding="same")(x)
    # x = layers.Dropout(0.25)(x)

    x = layers.Conv3D(filters=64,
                      kernel_size=3,
                      activation="relu",
                      padding='same')(x)
    x = layers.Conv3D(filters=64,
                      kernel_size=3,
                      activation="relu",
                      padding='same')(x)
    x = layers.Conv3D(filters=64,
                      kernel_size=3,
                      activation="relu",
                      padding='same')(x)
    x = layers.MaxPool3D(pool_size=(2, 2, 2), padding="same")(x)
    # x = layers.Dropout(0.25)(x)

    x = layers.Conv3D(filters=128,
                      kernel_size=3,
                      activation="relu",
                      padding='same')(x)
    x = layers.Conv3D(filters=128,
                      kernel_size=3,
                      activation="relu",
                      padding='same')(x)
    # x = layers.MaxPool3D(pool_size=(3,3,3), padding="same")(x)

    # x = layers.Conv3D(filters=128, kernel_size=3, activation="relu",padding='same')(x)
    # x = layers.Conv3D(filters=128, kernel_size=3, activation="relu",padding='same')(x)
    # x = layers.MaxPool3D(pool_size=(3,3,3), padding="same")(x)
    # x = layers.BatchNormalization()(x)

    # x = layers.Conv3D(filters=64, kernel_size=3, activation="relu",padding='same')(x)
    # x = layers.MaxPool3D(pool_size=(2,2,2))(x)
    # x = layers.BatchNormalization()(x)

    # x = layers.Conv3D(filters=128, kernel_size=3, activation="relu",padding='same')(x)
    # x = layers.MaxPool3D(pool_size=(2,2,2))(x)
    # x = layers.BatchNormalization()(x)

    # x = layers.Conv3D(filters=256, kernel_size=3, activation="relu",padding='same')(x)
    # # x = layers.MaxPool3D(pool_size=2)(x)
    # x = layers.BatchNormalization()(x)

    x = layers.GlobalAveragePooling3D()(x)

    x = layers.Dense(units=512)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)
    # x = layers.Dropout(0.3)(x)

    x = layers.Dense(units=256)(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)
    # x = layers.Dropout(0.3)(x)
    # x = layerss.Dropout(0.3)(x)

    outputs = layers.Dense(units=8, activation="softmax")(x)

    # Define the model.
    model = keras.Model(inputs, outputs, name="3dcnn")
    return model
示例#14
0
def get_pyramidnet(dataset, depth, alpha, num_classes, bottleneck=False):
    if dataset.startswith('cifar'):
        inplanes = 16
        if bottleneck == True:
            n = int((depth - 2) / 9)
            block = bottleneck
        else:
            n = int((depth - 2) / 6)
            block = basicblock

        addrate = alpha / (3 * n * 1.0)

        inp = layers.Input(shape=(32, 32, 3))
        x = inp

        x = layers.Lambda(
            lambda v: tf.tile(v[..., None, :], [1, 1, 1, num_rot, 1]))(x)

        x = conv3x3(x, inplanes, stride=1)
        x = batchnorm(x)

        featuremap_dim = inplanes
        x, featuremap_dim = pyramidal_make_layer(x, block, n, 1,
                                                 featuremap_dim, addrate)
        x, featuremap_dim = pyramidal_make_layer(x, block, n, 2,
                                                 featuremap_dim, addrate)
        x, featuremap_dim = pyramidal_make_layer(x, block, n, 2,
                                                 featuremap_dim, addrate)

        x = batchnorm(x)
        x = activation(x)
        x = layers.GlobalAveragePooling3D()(x)
        x = layers.Dense(num_classes)(x)

        outp = x

    elif dataset == 'imagenet':
        blocks = {
            18: basicblock,
            34: basicblock,
            50: bottleneck,
            101: bottleneck,
            152: bottleneck,
            200: bottleneck
        }
        layers_ = {
            18: [2, 2, 2, 2],
            34: [3, 4, 6, 3],
            50: [3, 4, 6, 3],
            101: [3, 4, 23, 3],
            152: [3, 8, 36, 3],
            200: [3, 24, 36, 3]
        }

        if layers_.get(depth) is None:
            if bottleneck == True:
                blocks[depth] = bottleneck
                temp_cfg = int((depth - 2) / 12)
            else:
                blocks[depth] = basicblock
                temp_cfg = int((depth - 2) / 8)

            layers_[depth] = [temp_cfg, temp_cfg, temp_cfg, temp_cfg]
            print('=> the layer configuration for each stage is set to',
                  layers_[depth])

        inplanes = 64
        addrate = alpha / (sum(layers_[depth]) * 1.0)

        inp = layers.Input(shape=(32, 32, 3))
        x = inp

        x = conv3x3(x, inplanes, stride=2, kernel_size=7)
        # x = layers.Conv2D(inplanes, kernel_size=7,
        #                   stride=2, padding='same', use_bias=False)(x)

        x = batchnorm(x)
        x = activation(x)
        x = layers.MaxPool3D(pool_size=(3, 3, 1),
                             strides=(2, 2, 1),
                             padding='same')(x)

        featuremap_dim = inplanes
        x, featuremap_dim = pyramidal_make_layer(x, blocks[depth],
                                                 layers_[depth][0], 1,
                                                 featuremap_dim, addrate)
        x, featuremap_dim = pyramidal_make_layer(x, blocks[depth],
                                                 layers_[depth][1], 2,
                                                 featuremap_dim, addrate)
        x, featuremap_dim = pyramidal_make_layer(x, blocks[depth],
                                                 layers_[depth][2], 2,
                                                 featuremap_dim, addrate)
        x, featuremap_dim = pyramidal_make_layer(x, blocks[depth],
                                                 layers_[depth][3], 2,
                                                 featuremap_dim, addrate)

        x = batchnorm(x)
        x = activation(x)
        x = layers.GlobalAvergePooling3D()(x)
        x = layers.Dense(num_classes)(x)

        outp = x

    return models.Model(inp, outp)
示例#15
0
    def __init__(self, ch= 32):
        super(discriminator, self).__init__()
        self.ch = ch
        # self.inputBN = layers.BatchNormalization()
        self.xD0 = tfa.layers.SpectralNormalization(layers.Dense(512, name = 'parameter1_layer_1'))
        # if self.hparams[HP_BN_UNITS] : x = layers.BatchNormalization()(x)
        # self.xD0BN = layers.BatchNormalization()
        self.xR0 = layers.LeakyReLU()
        self.xD1 = tfa.layers.SpectralNormalization(layers.Dense(512, name = 'parameter1_layer_2'))
        # if self.hparams[HP_BN_UNITS] : x = layers.BatchNormalization()(x)
        # self.xD1BN = layers.BatchNormalization()
        self.xR1 = layers.LeakyReLU()
        self.xD2 = tfa.layers.SpectralNormalization(layers.Dense(512, name = 'parameter1_layer_3'))
        # if self.hparams[HP_BN_UNITS] : x = layers.BatchNormalization()(x)
        # self.xD2BN = layers.BatchNormalization()
        self.xR2 = layers.LeakyReLU()
        
        self.yD0 = tfa.layers.SpectralNormalization(layers.Dense(512, name = 'parameter2_layer_1'))
        # if self.hparams[HP_BN_UNITS] : y = layers.BatchNormalization()(y)
        # self.yD0BN = layers.BatchNormalization()
        self.yR0 = layers.LeakyReLU()
        self.yD1 = tfa.layers.SpectralNormalization(layers.Dense(512, name = 'parameter2_layer_2'))
        # if self.hparams[HP_BN_UNITS] : y = layers.BatchNormalization()(y)
        # self.yD1BN = layers.BatchNormalization()
        self.yR1 = layers.LeakyReLU()
        self.yD2 = tfa.layers.SpectralNormalization(layers.Dense(512, name = 'parameter2_layer_3'))
        # if self.hparams[HP_BN_UNITS] : y = layers.BatchNormalization()(y)
        # self.yD2BN = layers.BatchNormalization()
        self.yR2 = layers.LeakyReLU()

        
        self.zD0 = tfa.layers.SpectralNormalization(layers.Dense(512, name = 'parameter3_layer_1'))
        # if self.hparams[HP_BN_UNITS] : z = layers.BatchNormalization()(z)
        # self.zD0BN = layers.BatchNormalization()
        self.zR0 = layers.LeakyReLU()
        self.zD1 = tfa.layers.SpectralNormalization(layers.Dense(512, name = 'parameter3_layer_2'))
        # if self.hparams[HP_BN_UNITS] : z = layers.BatchNormalization()(z)
        # self.zD1BN = layers.BatchNormalization()
        self.zR1 = layers.LeakyReLU()
        self.zD2 = tfa.layers.SpectralNormalization(layers.Dense(512, name = 'parameter3_layer_3'))
        # if self.hparams[HP_BN_UNITS] : z = layers.BatchNormalization()(z)
        # self.zD2BN = layers.BatchNormalization()
        self.zR2 = layers.LeakyReLU()

        # self.concateD = tfa.layers.SpectralNormalization(layers.Dense(ch*32))
        # self.concateR = layers.LeakyReLU()

        # self.conv0 = tfa.layers.SpectralNormalization(layers.Conv3D(ch, kernel_size=3, strides=2, padding='same', use_bias=False))
        #self.resR0 = layers.LeakyReLU()
        # self.conv1 = tfa.layers.SpectralNormalization(layers.Conv3D(2*ch, kernel_size=3, strides=2, padding='same', use_bias=False))
        # self.resR1 = layers.LeakyReLU()
        # self.conv2 = tfa.layers.SpectralNormalization(layers.Conv3D(4*ch, kernel_size=3, strides=2, padding='same', use_bias=False))
        # self.resR2 = layers.LeakyReLU()
        # self.conv3 = tfa.layers.SpectralNormalization(layers.Conv3D(8*ch, kernel_size=3, strides=2, padding='same', use_bias=False))
        # self.resR3 = layers.LeakyReLU()
        self.res0 = ResBlock_discriminator(ch*2, ksize=4, shortcut=True)
        self.res1 = ResBlock_discriminator(ch*4, ksize=4 , shortcut=True)
        self.res2 = ResBlock_discriminator(ch*8, ksize=4, shortcut=True)
        self.res3 = ResBlock_discriminator(ch*16, ksize=4, shortcut=True)
        # self.res4 = ResBlock_discriminator(ch*8, ksize=3)
        # self.res5 = ResBlock_discriminator(ch*16, ksize=3)
        # self.res6 = ResBlock_discriminator(ch*16, ksize=3)
        # self.conv4 = tfa.layers.SpectralNormalization(layers.Conv3D(1, kernel_size=4, strides=1, padding='valid', use_bias=False))
        self.GAV3D = layers.GlobalAveragePooling3D()
        #self.out = tfa.layers.SpectralNormalization(layers.Dense(1, kernel_initializer=initializers.he_normal()))
        # self.conv = tfa.layers.SpectralNormalization(layers.Dense(ch*32))
        self.multiple = layers.Multiply()
        self.sum = tf.math.reduce_sum
        
        self.out = layers.add
        self.outputD = tfa.layers.SpectralNormalization(layers.Dense(1))
        self.outputActivation = layers.Activation('linear')
示例#16
0
文件: model.py 项目: zwcdp/autokeras
def to_real_keras_layer(stub_layer):
    if isinstance(stub_layer, StubConv1d):
        return layers.Conv1D(stub_layer.filters,
                             stub_layer.kernel_size,
                             strides=stub_layer.stride,
                             input_shape=stub_layer.input.shape,
                             padding='same')  # padding

    elif isinstance(stub_layer, StubConv2d):
        return layers.Conv2D(stub_layer.filters,
                             stub_layer.kernel_size,
                             strides=stub_layer.stride,
                             input_shape=stub_layer.input.shape,
                             padding='same')  # padding

    elif isinstance(stub_layer, StubConv3d):
        return layers.Conv3D(stub_layer.filters,
                             stub_layer.kernel_size,
                             strides=stub_layer.stride,
                             input_shape=stub_layer.input.shape,
                             padding='same')  # padding

    # TODO: Spatial Dropout
    elif isinstance(stub_layer, (StubDropout1d, StubDropout2d, StubDropout3d)):
        return layers.Dropout(stub_layer.rate)
    # elif isinstance(stub_layer, StubDropout2d):
    #     return layers.SpatialDropout2D(stub_layer.rate)
    # elif isinstance(stub_layer, StubDropout3d):
    #     return layers.SpatialDropout3D(stub_layer.rate)

    elif isinstance(stub_layer, StubAvgPooling1d):
        return layers.AveragePooling1D(stub_layer.kernel_size,
                                       strides=stub_layer.stride)
    elif isinstance(stub_layer, StubAvgPooling2d):
        return layers.AveragePooling2D(stub_layer.kernel_size,
                                       strides=stub_layer.stride)
    elif isinstance(stub_layer, StubAvgPooling3d):
        return layers.AveragePooling3D(stub_layer.kernel_size,
                                       strides=stub_layer.stride)

    elif isinstance(stub_layer, StubGlobalPooling1d):
        return layers.GlobalAveragePooling1D()
    elif isinstance(stub_layer, StubGlobalPooling2d):
        return layers.GlobalAveragePooling2D()
    elif isinstance(stub_layer, StubGlobalPooling3d):
        return layers.GlobalAveragePooling3D()

    elif isinstance(stub_layer, StubPooling1d):
        return layers.MaxPooling1D(stub_layer.kernel_size,
                                   strides=stub_layer.stride)
    elif isinstance(stub_layer, StubPooling2d):
        return layers.MaxPooling2D(stub_layer.kernel_size,
                                   strides=stub_layer.stride)
    elif isinstance(stub_layer, StubPooling3d):
        return layers.MaxPooling3D(stub_layer.kernel_size,
                                   strides=stub_layer.stride)

    elif isinstance(stub_layer,
                    (StubBatchNormalization1d, StubBatchNormalization2d,
                     StubBatchNormalization3d)):
        return layers.BatchNormalization(input_shape=stub_layer.input.shape)

    elif isinstance(stub_layer, StubSoftmax):
        return layers.Activation('softmax')
    elif isinstance(stub_layer, StubReLU):
        return layers.Activation('relu')
    elif isinstance(stub_layer, StubFlatten):
        return layers.Flatten()
    elif isinstance(stub_layer, StubAdd):
        return layers.Add()
    elif isinstance(stub_layer, StubConcatenate):
        return layers.Concatenate()
    elif isinstance(stub_layer, StubDense):
        return layers.Dense(stub_layer.units,
                            input_shape=(stub_layer.input_units, ))
示例#17
0
文件: models.py 项目: mjneel/biondi
def resnet50_3d(inputs,
                filter_ratio=1,
                n=2,
                include_fc_layer=False,
                logits=True,
                kernal1=(1, 1, 1),
                kernal3=(1, 3, 3),
                kernal7=(1, 7, 7),
                num_layers=None):
    """

    :param inputs: Keras Input object with desire shape
    :type inputs:
    :param filter_ratio:
    :type filter_ratio:
    :param n: # of categories
    :type n: integer
    :param include_fc_layer:
    :type include_fc_layer:
    :return:
    :rtype:
    """
    # --- Define kwargs dictionary
    kwargs1 = {
        'kernel_size': kernal1,
        'padding': 'valid',
    }
    kwargs3 = {
        'kernel_size': kernal3,
        'padding': 'same',
    }
    kwargs7 = {
        'kernel_size': kernal7,
        'padding': 'valid',
    }
    # --- Define block components
    conv1 = lambda x, filters, strides: layers.Conv3D(
        filters=filters, strides=strides, **kwargs1)(x)
    conv3 = lambda x, filters, strides: layers.Conv3D(
        filters=filters, strides=strides, **kwargs3)(x)
    relu = lambda x: layers.LeakyReLU()(x)
    conv7 = lambda x, filters, strides: layers.Conv3D(
        filters=filters, strides=strides, **kwargs7)(x)
    max_pool = lambda x, pool_size, strides: layers.MaxPooling3D(
        pool_size=pool_size, strides=strides, padding='valid')(x)
    norm = lambda x: layers.BatchNormalization()(x)
    add = lambda x, y: layers.Add()([x, y])
    zeropad = lambda x, padding: layers.ZeroPadding3D(padding=padding)(x)
    # --- Residual blocks
    # conv blocks
    conv_1 = lambda filters, x, strides: relu(
        norm(conv1(x, filters, strides=strides)))
    conv_2 = lambda filters, x: relu(norm(conv3(x, filters, strides=1)))
    conv_3 = lambda filters, x: norm(conv1(x, filters, strides=1))
    conv_sc = lambda filters, x, strides: norm(
        conv1(x, filters, strides=strides))
    conv_block = lambda filters1, filters2, x, strides: relu(
        add(conv_sc(filters2, x, strides),
            conv_3(filters2, conv_2(filters1, conv_1(filters1, x, strides)))))
    # identity blocks
    identity_1 = lambda filters, x: relu(norm(conv1(x, filters, strides=1)))
    identity_2 = lambda filters, x: relu(norm(conv3(x, filters, strides=1)))
    identity_3 = lambda filters, x: norm(conv1(x, filters, strides=1))
    identity_block = lambda filters1, filters2, x: relu(
        add(
            identity_3(filters2, identity_2(filters1, identity_1(filters1, x))
                       ), x))
    # --- ResNet-50 backbone
    # stage 1 c2 1/4
    res1 = max_pool(zeropad(
        relu(
            norm(
                conv7(zeropad(inputs, (0, 3, 3)),
                      int(64 * filter_ratio),
                      strides=(1, 2, 2)))), (0, 1, 1)), (1, 3, 3),
                    strides=(1, 2, 2))
    # stage 2 c2 1/4
    res2 = layers.Lambda(lambda x: x, name='c2-output')(identity_block(
        int(64 * filter_ratio), int(256 * filter_ratio),
        identity_block(
            int(64 * filter_ratio), int(256 * filter_ratio),
            conv_block(int(64 * filter_ratio),
                       int(256 * filter_ratio),
                       res1,
                       strides=1))))
    # stage 3 c3 1/8
    res3 = layers.Lambda(lambda x: x, name='c3-output')(identity_block(
        int(128 * filter_ratio), int(512 * filter_ratio),
        identity_block(
            int(128 * filter_ratio), int(512 * filter_ratio),
            identity_block(
                int(128 * filter_ratio), int(512 * filter_ratio),
                conv_block(int(128 * filter_ratio),
                           int(512 * filter_ratio),
                           res2,
                           strides=(1, 2, 2))))))
    # stage 4 c4 1/16
    res4 = layers.Lambda(lambda x: x, name='c4-output')(identity_block(
        int(256 * filter_ratio), int(1024 * filter_ratio),
        identity_block(
            int(256 * filter_ratio), int(1024 * filter_ratio),
            identity_block(
                int(256 * filter_ratio), int(1024 * filter_ratio),
                identity_block(
                    int(256 * filter_ratio), int(1024 * filter_ratio),
                    identity_block(
                        int(256 * filter_ratio), int(1024 * filter_ratio),
                        conv_block(int(256 * filter_ratio),
                                   int(1024 * filter_ratio),
                                   res3,
                                   strides=(1, 2, 2))))))))
    # stage 5 c5 1/32
    res5 = layers.Lambda(lambda x: x, name='c5-output')(identity_block(
        int(512 * filter_ratio), int(2048 * filter_ratio),
        identity_block(
            int(512 * filter_ratio), int(2048 * filter_ratio),
            conv_block(int(512 * filter_ratio),
                       int(2048 * filter_ratio),
                       res4,
                       strides=(1, 2, 2)))))
    if num_layers:
        avg_pool = layers.GlobalAveragePooling3D()(
            [res1, res2, res3, res4, res5][num_layers - 1])
    else:
        avg_pool = layers.GlobalAveragePooling3D()(res5)
    flatten = layers.Flatten()(avg_pool)
    if logits:
        logits = layers.Dense(n)(flatten)
    else:
        logits = layers.Dense(n, activation='softmax')
    if include_fc_layer:
        model = Model(inputs=inputs, outputs=logits)
    else:
        model = Model(inputs=inputs, outputs=res5)
    return model
示例#18
0
def ResNet(stack_fn,
           preact,
           use_bias,
           model_name='resnet',
           include_top=True,
           input_tensor=None,
           input_shape=None,
           pooling=None,
           classes=1000,
           **kwargs):
    """Instantiates the ResNet, ResNetV2, and ResNeXt architecture.
    Optionally loads weights pre-trained on ImageNet.
    Note that the data format convention used by the model is
    the one specified in your Keras config at `~/.keras/keras.json`.
    # Arguments
        stack_fn: a function that returns output tensor for the
            stacked residual blocks.
        preact: whether to use pre-activation or not
            (True for ResNetV2, False for ResNet and ResNeXt).
        use_bias: whether to use biases for convolutional layers or not
            (True for ResNet and ResNetV2, False for ResNeXt).
        model_name: string, model name.
        include_top: whether to include the fully-connected
            layer at the top of the network.
        input_tensor: optional Keras tensor
            (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(224, 224, 3)` (with `channels_last` data format)
            or `(3, 224, 224)` (with `channels_first` data format).
            It should have exactly 3 inputs channels.
        pooling: optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.
    # Returns
        A Keras model instance.
    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
    """


    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        if not backend.is_keras_tensor(input_tensor):
            img_input = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1

    x = layers.ZeroPadding3D(padding=3, name='conv1_pad')(img_input)
    x = layers.Conv3D(64, 7, strides=2, use_bias=use_bias, name='conv1_conv')(x)

    if preact is False:
        x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
                                      name='conv1_bn')(x)
        x = layers.Activation('relu', name='conv1_relu')(x)

    x = layers.ZeroPadding3D(padding=1, name='pool1_pad')(x)
    x = layers.MaxPooling3D(3, strides=2, name='pool1_pool')(x)

    x = stack_fn(x)

    if preact is True:
        x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,
                                      name='post_bn')(x)
        x = layers.Activation('relu', name='post_relu')(x)

    if include_top:
        x = layers.GlobalAveragePooling3D(name='avg_pool')(x)
        x = layers.Dense(classes, activation='softmax', name='probs')(x)
    else:
        if pooling == 'avg':
            x = layers.GlobalAveragePooling3D(name='avg_pool')(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling3D(name='max_pool')(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = keras_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input

    # Create model.
    model = models.Model(inputs, x, name=model_name)

    return model
示例#19
0
def main():
    arg_list = None
    args = parseArgs(arg_list)
    # grab training data
    filepath = 'data/train_sample_videos'
    datapath = os.path.join(filepath, 'metadata.json')
    data = pd.read_json(datapath).T
    if args.sample:
        files = [os.path.join(filepath, f) for f in data.index][:20]
        labels = data.label.values[:20]
    else:
        files = [os.path.join(filepath, f) for f in data.index]
        labels = data.label.values
    x_train, x_test, y_train, y_test = train_test_split(
        files, labels, test_size=float(args.test_split))
    class_weights = compute_class_weight(
        'balanced', np.unique(y_train), y_train)
    for k, v in zip(np.unique(y_train), class_weights):
        print(k, v)
    y_train = list(map(lambda x: 0 if x == 'REAL' else 1, y_train))
    y_test = list(map(lambda x: 0 if x == 'REAL' else 1, y_test))
    y_train = to_categorical(y_train, num_classes=2)
    y_test = to_categorical(y_test, num_classes=2)
    print(len(x_train), len(y_train), len(x_test), len(y_test))

    # validation data
    val_path = 'data/test_videos'
    if args.sample:
        val_files = [os.path.join(val_path, f)
                     for f in os.listdir(val_path)][:8]
    else:
        val_files = [os.path.join(val_path, f) for f in os.listdir(val_path)]
    print('number of validation files', len(val_files))

    # generate datasets
    batch_size = args.batch_size
    segment_size = args.segment_size
    rsz = (128, 128)
    train_data = input_fn(
        x_train,
        y_train,
        segment_size=segment_size,
        batch_size=batch_size,
        rsz=rsz)
    test_data = input_fn(
        x_test,
        y_test,
        segment_size=segment_size,
        batch_size=batch_size,
        rsz=rsz)
    val_data = input_fn(
        files=val_files,
        segment_size=segment_size,
        batch_size=batch_size,
        rsz=rsz)
    rgb_input = tf.keras.Input(
        shape=(segment_size, rsz[0], rsz[1], 3),
        name='rgb_input')
    flow_input = tf.keras.Input(
        shape=(segment_size - 1, rsz[0], rsz[1], 2),
        name='flow_input')

    # TODO: make OO
    # RGB MODEL
    # block 1
    x = layers.Conv3D(
        filters=8,
        kernel_size=3,
        strides=(1, 1, 1),
        padding='same',
        data_format='channels_last',
        activation='relu',
    )(rgb_input)
    x = layers.Conv3D(
        filters=8,
        kernel_size=4,
        strides=(1, 1, 1),
        padding='same',
        data_format='channels_last',
        activation='relu',
    )(x)
    block1_output = layers.MaxPool3D(
        pool_size=(2, 2, 2),
        strides=(2, 2, 2),
        padding='same'
    )(x)
    # block 2
    x = layers.Conv3D(
        filters=8,
        kernel_size=3,
        strides=(1, 1, 1),
        padding='same',
        data_format='channels_last',
        activation='relu',
    )(block1_output)
    x = layers.Conv3D(
        filters=8,
        kernel_size=4,
        strides=(1, 1, 1),
        padding='same',
        data_format='channels_last',
        activation='relu',
    )(x)
    block2_output = layers.add([x, block1_output])
    # block 3
    x = layers.Conv3D(
        filters=8,
        kernel_size=3,
        strides=(1, 1, 1),
        padding='same',
        data_format='channels_last',
        activation='relu',
    )(block2_output)
    x = layers.Conv3D(
        filters=8,
        kernel_size=4,
        strides=(1, 1, 1),
        padding='same',
        data_format='channels_last',
        activation='relu',
    )(x)
    block3_output = layers.add([x, block2_output])

    x = layers.Conv3D(
        filters=8,
        kernel_size=3,
        strides=(1, 1, 1),
        padding='same',
        data_format='channels_last',
        activation='relu',
    )(block3_output)
    x = layers.GlobalAveragePooling3D()(x)
    x = layers.Dense(64, activation='relu')(x)
    x = layers.Dropout(0.5)(x)
    rgb_outputs = layers.Dense(2, activation='softmax')(x)

    rgb_model = Model(inputs=rgb_input, outputs=rgb_outputs)
    rgb_model.summary()

    # FLOW MODEL
    x = layers.ConvLSTM2D(
        filters=8,
        kernel_size=3,
        strides=1,
        padding='same',
        data_format='channels_last',
        return_sequences=True,
        dropout=0.5
    )(flow_input)
    x = layers.BatchNormalization()(x)
    x = layers.ConvLSTM2D(
        filters=8,
        kernel_size=3,
        strides=1,
        padding='same',
        data_format='channels_last',
        return_sequences=True,
        dropout=0.5
    )(x)
    x = layers.BatchNormalization()(x)
    x = layers.ConvLSTM2D(
        filters=8,
        kernel_size=3,
        strides=1,
        padding='same',
        data_format='channels_last',
        return_sequences=False,
        dropout=0.5
    )(x)
    x = layers.BatchNormalization()(x)
    x = layers.Flatten()(x)
    x = layers.Dense(128, activation='relu')(x)
    x = layers.Dense(128, activation='relu')(x)
    x = layers.Dense(128, activation='relu')(x)
    x = layers.Dropout(0.5)(x)
    flow_output = layers.Dense(2)(x)
    flow_model = Model(inputs=flow_input, outputs=flow_output)
    flow_model.summary()

    # FINAL MODEL
    final_average = layers.average([rgb_outputs, flow_output])
    x = layers.Flatten()(final_average)
    final_output = layers.Dense(
        2, activation='softmax', name='final_output')(x)
    model = Model(
        inputs={"rgb_input": rgb_input, "flow_input": flow_input},
        outputs=final_output,
        name='my_model'
    )
    model.summary()

    # tf.keras.utils.plot_model(
    #     model,
    #     to_file='model.png',
    #     show_shapes=True,
    #     show_layer_names=True
    # )

    # TRAIN
    dt = datetime.now().strftime('%Y%m%d_%H%M%S')
    opt = tf.keras.optimizers.Adam()
    if args.save_checkpoints:
        save_path = f'data/model_checkpoints/{dt}/ckpt'
        ckpt = tf.keras.callbacks.ModelCheckpoint(
            filepath=save_path,
            save_best_only=False,
            save_weights_only=True
        )
        ckpt = [ckpt]
    else:
        ckpt = []
    model.compile(
        optimizer=opt,
        loss='categorical_crossentropy',
        metrics=['acc'])
    model.fit(
        x=train_data.repeat(),
        validation_data=test_data.repeat(),
        epochs=args.epochs,
        verbose=1,
        class_weight=class_weights,
        steps_per_epoch=len(x_train) // batch_size,
        validation_steps=len(x_test) // batch_size,
        callbacks=ckpt
    )

    # EVAL
    print('\n\n---------------------------------------------------------')
    print('predicting on validation data')
    start = time.time()
    preds = model.predict(
        val_data,
        verbose=1,
        steps=len(val_files) // batch_size
    )
    print('prediction time: ', time.time() - start)
    preds = np.argmax(preds, axis=1)
    df = pd.DataFrame(columns=['filename', 'label'])
    df.filename = [v.split('/')[-1] for v in val_files]
    df.label = preds
    df.to_csv(f'data/submission_{dt}.csv', index=False)