コード例 #1
0
ファイル: stl_ae.py プロジェクト: hengyuan-hu/dem
def encode(x, relu_max):
    print 'encoder input shape:', x._keras_shape
    assert x._keras_shape[1:] == (96, 96, 3)

    # 96, 96, 3
    y = Conv2D(64,
               3,
               3,
               activation='relu',
               border_mode='same',
               subsample=(2, 2))(x)
    y = BN(mode=2, axis=3)(y)
    # 48, 48, 64
    y = Conv2D(128,
               3,
               3,
               activation='relu',
               border_mode='same',
               subsample=(2, 2))(y)
    y = BN(mode=2, axis=3)(y)
    # 24, 24, 128
    y = Conv2D(256,
               3,
               3,
               activation='relu',
               border_mode='same',
               subsample=(2, 2))(y)
    y = BN(mode=2, axis=3)(y)
    # 12, 12, 256
    y = Conv2D(512,
               3,
               3,
               activation='relu',
               border_mode='same',
               subsample=(2, 2))(y)
    y = BN(mode=2, axis=3)(y)
    # 6, 6, 512

    assert y._keras_shape[1:] == (6, 6, 512), \
        '%s vs %s' % (y._keras_shape[1:], [6, 6, 512])
    y = Conv2D(LATENT_DIM,
               6,
               6,
               activation='linear',
               border_mode='same',
               subsample=(6, 6))(y)
    # 1, 1, LATENT_DIM
    if not relu_max:
        print 'add noise and pretend relu_max will be:', RELU_MAX
        y = GaussianNoise(0.2 * RELU_MAX)(y)

    y = Activation(utils.relu_n(relu_max))(y)
    if relu_max:
        print 'relu_max:', relu_max
        y = Activation(utils.scale_down(relu_max))(y)
        # y in [0, 1]

    y = Reshape((LATENT_DIM, ))(y)  # or Reshape([-1])(y) ?
    # LATENT_DIM
    return y
コード例 #2
0
def basic_model():  # 1024 512
    model = Sequential()
    model.add(Dense(2048, input_shape=(90, )))
    model.add(Activation('relu'))
    model.add(Dropout(0.1))

    model.add(Dense(1024))
    model.add(BN())
    model.add(GN(0.1))
    model.add(Activation('relu'))

    model.add(Dense(512))
    model.add(BN())
    model.add(GN(0.1))
    model.add(Activation('relu'))

    model.add(Dense(1))
    model.add(Activation('relu'))
    model.summary()

    adam = keras.optimizers.Adam(lr=0.01,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=None,
                                 decay=0.0,
                                 amsgrad=False)

    model.compile(loss='mape', optimizer=adam, metrics=['mse'])
    return model
コード例 #3
0
def identity_block(x, kernel_size, filters, dilation, pad):

    filters_1, filters_2, filters_3 = filters
    x_shortcut = x

    # stage 1
    x = Conv2D(filters=filters_1,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='valid')(x)
    x = BN(axis=-1)(x)
    x = Activation('relu')(x)

    # stage 2
    x = ZeroPadding2D(padding=pad)(x)
    x = Conv2D(filters=filters_2,
               kernel_size=kernel_size,
               strides=(1, 1),
               dilation_rate=dilation)(x)
    x = BN(axis=-1)(x)
    x = Activation('relu')(x)

    # stage 3
    x = Conv2D(filters=filters_3,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='valid')(x)
    x = BN(axis=-1)(x)

    # stage 4
    x = Add()([x, x_shortcut])
    x = Activation('relu')(x)
    return x
コード例 #4
0
ファイル: functions.py プロジェクト: joakiol/NeuralNetsUPV
def resNet(model, filters, firstStrides=(1, 1)):
    # Use firstStrides=(2,2) when #filters is increased to downsize.

    shortcut = model

    model = Conv2D(filters,
                   kernel_initializer='he_normal',
                   kernel_regularizer=l2(1e-4),
                   kernel_size=(3, 3),
                   strides=firstStrides,
                   padding='same')(model)
    model = BN()(model)
    model = Activation('relu')(model)

    model = Conv2D(filters,
                   kernel_initializer='he_normal',
                   kernel_regularizer=l2(1e-4),
                   kernel_size=(3, 3),
                   padding='same')(model)
    model = BN()(model)

    if firstStrides != (1, 1):
        shortcut = Conv2D(filters,
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(1e-4),
                          kernel_size=(1, 1),
                          strides=firstStrides,
                          padding='same')(shortcut)
        shortcut = BN()(shortcut)

    model = keras.layers.add([shortcut, model])
    model = Activation('relu')(model)

    return model
コード例 #5
0
    def _build(self,input_shape):
        discriminator, loss = self.parameters['discriminator']
        if discriminator.trainable:
            print("discriminator is set to untrainable")
            discriminator.trainable = False

        x = Input(input_shape)  # assumes zero vector
        generated = Sequential([
            Lambda(lambda x: return x + K.random_uniform(shape=input_shape))
            Dense(self.parameters['layer'],activation=self.parameters['activation']),
            BN(),
            Dropout(self.parameters['dropout']),
            Dense(self.parameters['layer'],activation=self.parameters['activation']),
            BN(),
            Dropout(self.parameters['dropout']),
            Dense(self.parameters['layer'],activation=self.parameters['activation']),
            BN(),
            Dropout(self.parameters['dropout']),
            Dense(np.prod(input_shape),activation="sigmoid"),
            Reshape(input_shape)
        ])(x)

        discriminator_output = discriminator(generated)
        
        self._discriminator = discriminator
        self._generator     = Model(x, generated)
        self.net            = Model(x, discriminator_output)
        self.loss           = loss
コード例 #6
0
def Build_SAE(input_shape, n_class):
    x_in = layers.Input(shape=input_shape)
    x = layers.Dense(500)(x_in)
    x = ELU(alpha=0.5)(x)
    x = BN()(x)

    x = layers.Dense(1000)(x)
    x = ELU(alpha=0.5)(x)
    x = BN()(x)

    x = layers.Dense(1000)(x)
    x = ELU(alpha=0.5)(x)
    x = BN()(x)

    x = layers.Dense(100)(x)
    x = ELU(alpha=0.5)(x)
    x = BN()(x)

    x1 = layers.Dense(5)(x)
    x2 = ELU(alpha=0.5)(x1)
    x2 = BN()(x2)

    output = layers.Dense(n_class, activation='softmax')(x2)

    model = models.Model(x_in, output)
    model_fe = models.Model(x_in, x1)
    return model, model_fe
コード例 #7
0
def _residual_drop(x, input_shape, output_shape, strides=(1, 1)):
  global add_tables
  #nb_filter = output_shape[0]
  nb_filter = 32
  print(nb_filter)
  print(x.shape)
  conv = Convolution2D(nb_filter, (3, 3), subsample=strides, padding="same", kernel_regularizer=L2(weight_decay))(x)
  conv = BN(axis=1)(conv)
  conv = Activation("relu")(conv)
  conv = Convolution2D(nb_filter, (3, 3), padding="same", kernel_regularizer=L2(weight_decay))(conv)
  conv = BN(axis=1)(conv)
  if strides[0] >= 2:
      x = AveragePooling2D(strides)(x)
  if (output_shape[0] - input_shape[0]) > 0:
      pad_shape = (1,
                   output_shape[0] - input_shape[0],
                   output_shape[1],
                   output_shape[2])
      padding = K.zeros(pad_shape)
      padding = K.repeat_elements(padding, K.shape(x)[0], axis=0)
      x = Lambda(lambda y: K.concatenate([y, padding], axis=1),
                 output_shape=output_shape)(x)
  _death_rate = K.variable(death_rate)
  scale = K.ones_like(conv) - _death_rate
  conv = Lambda(lambda c: K.in_test_phase(scale * c, c),
                output_shape=output_shape)(conv)
  print(x.shape)
  print(conv.shape)
  out = add([x, x])
  out = Activation("relu")(out)
  gate = K.variable(1, dtype="uint8")
  add_tables += [{"death_rate": _death_rate, "gate": gate}]
  return Lambda(lambda tensors: K.switch(gate, tensors[0], tensors[1]),
                output_shape=output_shape)([out, x])
コード例 #8
0
def encode(x, use_noise, relu_max):
    print 'encoder input shape:', x._keras_shape
    assert x._keras_shape[1:] == (28, 28, 1)

    # 28, 28, 1
    y = Conv2D(20, 5, 5, activation='relu', border_mode='same', subsample=(2,2))(x)
    y = BN(mode=2, axis=3)(y)
    # 14, 14, 20
    y = Conv2D(40, 3, 3, activation='relu', border_mode='same', subsample=(2,2))(y)
    y = BN(mode=2, axis=3)(y)
    # 7, 7, 40
    print 'pre_fc shape:', y._keras_shape
    latent_dim = 80
    y = Conv2D(latent_dim, 7, 7, activation='linear',
               border_mode='same', subsample=(7,7))(y)
    # 1, 1, latent_dim
    if use_noise and not relu_max:
        print 'add noise and pretend relu_max will be:', RELU_MAX
        y = GaussianNoise(0.2 * RELU_MAX)(y)

    y = Activation(utils.relu_n(relu_max))(y)
    if relu_max:
        print 'relu max:', relu_max
        y = Activation(utils.scale_down(relu_max))(y)
        # y in [0, 1]
        if use_noise:
            y = GaussianNoise(0.2)(y)
            y = Activation('relu')(y)
    y = Reshape((latent_dim,))(y)
    # 80
    return y
コード例 #9
0
ファイル: cifar_cnn3.py プロジェクト: hhc2tech/AIprojects
def setModel(num_classes, shape):
    model = Sequential()
    model.add(Conv2D(32, (3, 3), padding='same', input_shape=shape))
    model.add(BN())
    model.add(GN(0.3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(BN())
    model.add(GN(0.3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(512, (3, 3), padding='same'))
    model.add(BN())
    model.add(GN(0.3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(512))
    model.add(BN())
    model.add(GN(0.3))
    model.add(Activation('relu'))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))
    sgd = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=False)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    return model
コード例 #10
0
def model_keras():
    # crea el modeloTrue
    model = Sequential()
    # primera capa
    model.add(Dense(128, input_shape=(87, )))
    model.add(BN())
    model.add(GN(0.3))
    model.add(Activation('relu'))
    # segunda capa
    model.add(Dense(256))
    model.add(BN())
    model.add(GN(0.3))
    model.add(Activation('relu'))
    # tercera capa
    model.add(Dense(256))
    model.add(BN())
    model.add(GN(0.3))
    model.add(Activation('relu'))
    # capa de salida
    model.add(Dense(1))
    model.add(GN(0.3))
    model.add(Activation('relu'))

    # compilar el modelo
    model.compile(loss='mean_squared_error', optimizer='adam')
    return model
コード例 #11
0
def cmodel():
    model = Sequential()

    # Dense 1
    model.add(Dense(1024, input_shape=(90, )))
    model.add(Activation('relu'))
    model.add(Dropout(0.1))

    model.add(Dense(1024))
    model.add(BN())
    model.add(GN(0.1))
    model.add(Activation('relu'))

    model.add(Dense(512))
    model.add(BN())
    model.add(GN(0.1))
    model.add(Activation('relu'))

    model.add(Dense(num_classes))
    model.add(Activation('softmax'))

    model.summary()

    sgd = SGD(lr=0.0, momentum=0.9, decay=0.0, nesterov=False)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    return model
コード例 #12
0
def LSTM_Model():
    model = Sequential()
    model.add(Conv2D(30, kernel_size=(5,5), strides=(1,1), activation='relu', input_shape=(374, 324, 3)))
    model.add(BN())
    model.add(Conv2D(30, kernel_size=(5,5), strides=(1,1), activation='relu'))
    model.add(BN())
    model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
    model.add(Conv2D(16, kernel_size=(3,3), strides=(1,1), activation='relu'))
    model.add(BN())
    model.add(Conv2D(16, kernel_size=(3,3), strides=(1,1), activation='relu'))
    model.add(BN())
    model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
    model.add(Conv2D(16, kernel_size=(3,3), strides=(1,1), activation='relu'))
    model.add(BN())
    model.add(Conv2D(16, kernel_size=(3,3), strides=(1,1), activation='relu'))
    model.add(BN())
    model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
    model.add(Conv2D(16, kernel_size=(3,3), strides=(1,1), activation='relu'))
    model.add(BN())
    model.add(Conv2D(16, kernel_size=(3,3), strides=(1,1), activation='relu'))
    model.add(BN())
    model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
    model.add(Conv2D(16, kernel_size=(3,3), strides=(1,1), activation='relu'))
    model.add(BN())
    model.add(Conv2D(16, kernel_size=(3,3), strides=(1,1), activation='relu'))
    model.add(BN())
    model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2)))
    model.add(Reshape((1,672)))
    model.add(LSTM(64, activation='tanh', return_sequences=True))
    model.add(LSTM(64, activation='tanh', return_sequences=True))
    model.add(LSTM(64*3, activation='tanh', return_sequences=True))
    model.add(Reshape((8,8,3)))
    model.add(Conv2DTranspose(16, kernel_size=(3,3),
        strides=1, activation='relu'))
    model.add(Conv2DTranspose(16, kernel_size=(3,3),
        strides=1, activation='relu'))
    model.add(UpSampling2D(size=(2,2)))
    model.add(Conv2DTranspose(16, kernel_size=(3,3),
        strides=1, activation='relu'))
    model.add(Conv2DTranspose(16, kernel_size=(3,3),
        strides=1, activation='relu'))
    model.add(UpSampling2D(size=(2,2)))
    model.add(Conv2DTranspose(16, kernel_size=(3,3),
        strides=1, activation='relu'))
    model.add(Conv2DTranspose(16, kernel_size=(3,3),
        strides=1, activation='relu'))
    model.add(UpSampling2D(size=(2,2)))
    model.add(Conv2DTranspose(16, kernel_size=(3,3),
        strides=1, activation='relu'))
    model.add(Conv2DTranspose(1, kernel_size=(3,3),
        strides=1, activation='relu'))
    model.add(UpSampling2D(size=(2,2)))
    model.compile(optimizer = opt, loss = 'mse', metrics=['accuracy'])
    #model.summary()

    return model
コード例 #13
0
def encoder(input_dim, dims, batch_norm):
    nb_layers = len(dims)
    x = Input(shape=(input_dim, ))
    for i in range(nb_layers):
        if i == 0:
            h = PReLU()(Dense(dims[i])(x))
            if batch_norm:
                h = BN()(h)
        else:
            h = PReLU()(Dense(dims[i])(h))
            if batch_norm:
                h = BN()(h)
    return Model(inputs=[x], outputs=[h])
コード例 #14
0
def setModel(num_classes):
    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(BN())
    model.add(GN(0.3))
    model.add(Activation('relu'))
    model.add(Dense(512))
    model.add(BN())
    model.add(GN(0.3))
    model.add(Activation('relu'))
    model.add(Dense(num_classes, activation='softmax'))
    sgd=SGD(lr=0.01, decay=1e-6, momentum=0.9)
    model.compile(loss='categorical_crossentropy',optimizer=sgd,metrics=['accuracy'])
    return model
コード例 #15
0
def decoder(output_dim, z_dim, dims, batch_norm):
    x = Input(shape=(z_dim, ))
    nb_layers = len(dims)
    for i in range(nb_layers):
        if i == 0:
            h = PReLU()(Dense(dims[-(i + 1)])(x))
            if batch_norm:
                h = BN()(h)
        else:
            h = PReLU()(Dense(dims[-(i + 1)])(h))
            if batch_norm:
                h = BN()(h)
    recon = Dense(output_dim, activation='sigmoid')(h)
    return Model(inputs=[x], outputs=[recon])
コード例 #16
0
ファイル: functions.py プロジェクト: joakiol/NeuralNetsUPV
def bottleneck(model,
               filters_low,
               filters_high,
               upscale=False,
               firstStrides=(1, 1)):
    # Use firstStrides=(2,2) when increasing # filters.
    # Use upscale=True first time.

    shortcut = model

    model = Conv2D(filters_low,
                   kernel_initializer='he_normal',
                   kernel_regularizer=l2(1e-4),
                   kernel_size=(1, 1),
                   strides=(1, 1),
                   padding='same')(model)
    model = BN()(model)
    model = Activation('relu')(model)

    model = Conv2D(filters_low,
                   kernel_initializer='he_normal',
                   kernel_regularizer=l2(1e-4),
                   kernel_size=(3, 3),
                   strides=firstStrides,
                   padding='same')(model)
    model = BN()(model)
    model = Activation('relu')(model)

    model = Conv2D(filters_high,
                   kernel_initializer='he_normal',
                   kernel_regularizer=l2(1e-4),
                   kernel_size=(1, 1),
                   strides=(1, 1),
                   padding='same')(model)
    model = BN()(model)
    model = Activation('relu')(model)

    if upscale or firstStrides != (1, 1):
        shortcut = Conv2D(filters_high,
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(1e-4),
                          kernel_size=(1, 1),
                          strides=firstStrides,
                          padding='same')(shortcut)
        shortcut = BN()(shortcut)

    model = keras.layers.add([shortcut, model])
    model = Activation('relu')(model)

    return model
コード例 #17
0
def conv_bn_relu(input, filters, kernel_size=3):

    conv = Conv2D(filters, (kernel_size, kernel_size), padding='same')(input)
    conv = BN(axis=-1)(conv)
    conv = Activation('relu')(conv)

    return conv
def Attention_UNet(input_shape):

    inputs = Input(input_shape)

    # Downsampling layers
    # DownRes 1, double residual convolution + pooling
    conv_1 = res_block(inputs, 32, 3)
    pool_1 = MaxPooling2D(pool_size=(2, 2))(conv_1)

    # DownRes 2
    conv_2 = res_block(pool_1, 64, 3)
    pool_2 = MaxPooling2D(pool_size=(2, 2))(conv_2)

    # DownRes 3
    conv_3 = res_block(pool_2, 128, 3)
    pool_3 = MaxPooling2D(pool_size=(2, 2))(conv_3)

    # DownRes 4
    conv_4 = res_block(pool_3, 256, 3)
    pool_4 = MaxPooling2D(pool_size=(2, 2))(conv_4)

    # DownRes 5, convolution only
    conv_5 = res_block(pool_4, 512, 3)

    # Upsampling layers
    # UpRes 6, attention gated concatenation + upsampling + double residual convolution
    gating_4 = conv_bn_relu(conv_5, 256, kernel_size=1)
    attention_4 = attention_block(conv_4, gating_4, 256)
    up_4 = UpSampling2D(size=(2, 2))(conv_5)
    up_4 = concatenate([up_4, attention_4], axis=-1)
    up_conv_4 = res_block(up_4, 256, 3)

    # UpRes 7
    gating_3 = conv_bn_relu(up_conv_4, 128, kernel_size=1)
    attention_3 = attention_block(conv_3, gating_3, 128)
    up_3 = UpSampling2D(size=(2, 2))(up_conv_4)
    up_3 = concatenate([up_3, attention_3], axis=-1)
    up_conv_3 = res_block(up_3, 128, 3)

    # UpRes 8
    gating_2 = conv_bn_relu(up_conv_3, 64, kernel_size=1)
    attention_2 = attention_block(conv_2, gating_2, 64)
    up_2 = UpSampling2D(size=(2, 2))(up_conv_3)
    up_2 = concatenate([up_2, attention_2], axis=-1)
    up_conv_2 = res_block(up_2, 64, 3)

    # UpRes 9
    gating_1 = conv_bn_relu(up_conv_2, 32, kernel_size=1)
    attention_1 = attention_block(conv_1, gating_1, 32)
    up_1 = UpSampling2D(size=(2, 2))(up_conv_2)
    up_1 = concatenate([up_1, attention_1], axis=-1)
    up_conv_1 = res_block(up_1, 32, 3)

    # 1*1 convolutional layers, valid padding
    output = Conv2D(1, kernel_size=(1, 1))(up_conv_1)
    output = BN(axis=-1)(output)
    output = Activation('sigmoid')(output)

    model = Model(inputs=inputs, outputs=output, name='Attention_UNet')
    return model
コード例 #19
0
ファイル: Unet.py プロジェクト: xhongz/cancer
 def unet_conv(x_in, nf, rep=1):
     x_out = BN(axis=c)(Convolution2D(nf,
                                      3,
                                      3,
                                      activation='relu',
                                      border_mode='same')(x_in))
     #x_out = LeakyReLU(0.1)(x_out);
     if rep > 1:
         for i in range(rep - 1):
             x_out = BN(axis=c)(Convolution2D(nf,
                                              3,
                                              3,
                                              activation='relu',
                                              border_mode='same')(x_out))
             #x_out = LeakyReLU(0.1)(x_out);
     return x_out
コード例 #20
0
def build_model(maxlen=None, feats=None):
    print('Build model...')
    situations = 4
    vector_size = 256
    model_text = Sequential()
    model_text.add(
        Reshape((
            maxlen,
            vector_size,
        ), input_shape=(maxlen, vector_size)))
    model_text.add(GRU(int(128 * 10)))
    model_text.add(
        BN(epsilon=0.001,
           mode=0,
           axis=-1,
           momentum=0.99,
           weights=None,
           beta_init='zero',
           gamma_init='one',
           gamma_regularizer=None,
           beta_regularizer=None))
    model_text.add(Dropout(0.5))
    final_model = Sequential()
    final_model.add(model_text)
    final_model.add(Dense(len(feats)))
    final_model.add(Activation('linear'))
    optimizer = Adam()
    #reduce_lr = ReduceLROnPlateau(monitor='categorical_crossentropy', factor=0.2, patience=5, min_lr=0.0001)
    #final_model.compile(loss='poisson', optimizer=optimizer)
    final_model.compile(loss='mean_squared_error', optimizer=optimizer)
    return final_model
def attention_block(x, gating, inter_channel):
    '''
        inter_channel: intermediate channel
    '''
    shape_x = K.int_shape(x)
    shape_g = K.int_shape(gating)

    theta_x = Conv2D(inter_channel, (2, 2), strides=(2, 2), padding='same')(x)
    shape_theta_x = K.int_shape(theta_x)

    phi_g = Conv2D(inter_channel, (1, 1), strides=(1, 1),
                   padding='same')(gating)
    upsample_g = UpSampling2D(size=(shape_theta_x[1] // shape_g[1],
                                    shape_theta_x[2] // shape_g[2]))(phi_g)

    relu_xg = Activation('relu')(add([theta_x, upsample_g]))

    psi = Conv2D(1, (1, 1), strides=(1, 1), padding='same')(relu_xg)
    sigmoid_psi = Activation('sigmoid')(psi)
    shape_sigmoid_psi = K.int_shape(sigmoid_psi)

    upsample_psi = UpSampling2D(size=(shape_x[1] // shape_sigmoid_psi[1],
                                      shape_x[2] //
                                      shape_sigmoid_psi[2]))(sigmoid_psi)
    upsample_psi = expend_as(upsample_psi, shape_x[3])

    out = multiply([upsample_psi, x])
    out = Conv2D(shape_x[3], kernel_size=(1, 1), padding='same')(out)
    out = BN(axis=-1)(out)

    return out
def bn_act(x, activation='relu'):
    l = BN()(x)
    if activation == 'prelu':
        l = PReLU()(l)
    else:
        l = Activation('relu')(l)
    return l
def bottleneck_identity_block(input, kernel_size, filters, pad):

    filters_1, filters_2, filters_3 = filters
    x_shortcut = input

    # stage 1
    x = conv_bn_relu(input,
                     filters=filters_1,
                     kernel_size=(1, 1),
                     strides=(1, 1),
                     padding='valid')

    # stage 2
    x = ZeroPadding2D(padding=pad)(x)
    x = conv_bn_relu(x,
                     filters=filters_2,
                     kernel_size=kernel_size,
                     strides=(1, 1),
                     padding='valid')

    # stage 3
    x = Conv2D(filters=filters_3,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='valid')(x)
    x = BN(axis=-1)(x)

    # stage 4
    x = add([x, x_shortcut])
    x = Activation(activation='relu')(x)
    return x
コード例 #24
0
def build_model(maxlen=None, feats=None):
    print('Build model...')
    situations = 4
    vector_size = 256
    model_text = Sequential()
    model_text.add(
        Reshape((
            maxlen,
            vector_size,
        ), input_shape=(maxlen, vector_size)))
    model_text.add(GRU(int(128 * 8)))
    model_text.add(
        BN(epsilon=0.001,
           mode=0,
           axis=-1,
           momentum=0.99,
           weights=None,
           beta_init='zero',
           gamma_init='one',
           gamma_regularizer=None,
           beta_regularizer=None))
    model_text.add(Dropout(0.5))
    final_model = Sequential()
    final_model.add(model_text)
    final_model.add(Dense(len(feats)))
    final_model.add(Activation('softmax'))
    optimizer = Adam()
    final_model.compile(loss='categorical_crossentropy', optimizer=optimizer)
    return final_model
コード例 #25
0
def model_architecture(img_rows,img_cols,img_channels,nb_classes):
    #function defining the architecture of defined CNN
    
    model = Sequential()

    model.add(Convolution2D(32, 3, 3, activation='relu', border_mode='same',init='orthogonal', bias = True, input_shape=(img_channels,img_rows, img_cols)))

    model.add(BN(axis=1, momentum=0.99, epsilon=0.001))
    Dropout((0.25))

    model.add(Convolution2D(32, 3, 3, activation='relu', border_mode='same',init='orthogonal', bias = True))

    model.add(BN(axis=1, momentum=0.99, epsilon=0.00001))
    Dropout((0.25))

    model.add(Convolution2D(32, 3, 3, activation='relu', border_mode='same',init='orthogonal', bias = True))
    model.add(MaxPooling2D(pool_size=(2, 2), strides = (2,2)))
    model.add(Convolution2D(64, 3, 3, activation='relu', border_mode='same',init='orthogonal', bias = True))

    model.add(BN(axis=1, momentum=0.99, epsilon=0.001))
    Dropout((0.25))

    model.add(Convolution2D(64, 3, 3, activation='relu', border_mode='same',init='orthogonal', bias = True))

    model.add(BN(axis=1, momentum=0.99, epsilon=0.001))
    Dropout((0.25))

    model.add(Convolution2D(64, 3, 3, activation='relu', border_mode='same',init='orthogonal', bias = True))
    model.add(MaxPooling2D(pool_size=(2, 2), strides = (2,2)))
    model.add(Convolution2D(96, 3, 3, activation='relu', border_mode='same',init='orthogonal', bias = True))

    model.add(BN(axis=1, momentum=0.99, epsilon=0.001))
    Dropout((0.25))

    model.add(Convolution2D(96, 3, 3, activation='relu', border_mode='same',init='orthogonal', bias = True))

    model.add(BN(axis=1, momentum=0.99, epsilon=0.001))
    Dropout((0.25))

    model.add(Convolution2D(96, 3, 3, activation='relu', border_mode='same',init='orthogonal', bias = True))
    model.add(MaxPooling2D(pool_size=(2, 2), strides = (2,2)))
    model.add(Convolution2D(128, 3, 3, activation='relu', border_mode='same',init='orthogonal', bias = True))

    model.add(BN(axis=1, momentum=0.99, epsilon=0.001))
    Dropout((0.5))

    model.add(Convolution2D(512, 1, 1, activation='relu', border_mode='same',init='orthogonal', bias = True))

    model.add(BN(axis=1, momentum=0.99, epsilon=0.001))
    Dropout((0.5))

    model.add(Convolution2D(2, 1, 1, activation='relu', border_mode='same',init='orthogonal', bias = True))
    model.add(GlobalAveragePooling2D(dim_ordering='default'))

    model.add(Activation('softmax'))
    model.summary()
    return model
コード例 #26
0
def buildDecoder3d(model, filters, filtersize=3):
    model.add(
        Conv3D(filters, (filtersize, filtersize, filtersize), padding='same'))
    model.add(BN())
    model.add(Dropout(0.2))
    model.add(Activation('relu'))
    model.add(UpSampling3D(size=(2, 2, 2)))
    return model
コード例 #27
0
def CBGN(model, filters, lname, ishape=0):
    if (ishape != 0):
        model.add(Conv2D(filters, (3, 3), padding='same', input_shape=ishape))
    else:
        model.add(Conv2D(filters, (3, 3), padding='same'))

    model.add(BN())
    model.add(GN(0.3))
    model.add(Activation('relu'))

    model.add(Conv2D(filters, (3, 3), padding='same'))
    model.add(BN())
    model.add(GN(0.3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2), name=lname))

    return model
def getUnet3D(img_frame,img_rows,img_cols,img_channels,n_cls):
    #keras.set_floatx('float16')
    input_shape = (img_frame,None,None,img_channels)#input_shape = (img_frame,img_rows,img_cols,img_channels)
    inputs = Input(input_shape)
    print(inputs)
        
    conv1 = Conv3D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
    conv1 = BN(axis=-1)(conv1)
    conv1 = Conv3D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
    conv1 = BN(axis=-1)(conv1)
    pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
        
    conv2 = Conv3D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
    conv2 = Conv3D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
    conv2 = BN(axis=-1)(conv2)
    pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
        
    conv3 = Conv3D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
    conv3 = Conv3D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
    conv3 = BN(axis=-1)(conv3)
    pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)
        
    conv4 = Conv3D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
    conv4 = Conv3D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
    conv4 = BN(axis=-1)(conv4)
    print("conv4 shape:",conv4.shape)
    drop4 = Dropout(0.5)(conv4)
    print("drop4 shape:",drop4.shape)
        
    up5 = Conv3D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling3D(size = (2,2,2))(drop4))
    print("up5 shape:",up5.shape)
    merge5 = Concatenate(axis=4)([conv3,up5])
    conv5 = Conv3D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge5)
    conv5 = Conv3D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
    conv5 = BN(axis=-1)(conv5)
    print("conv5 shape:",conv5.shape)
        
    up6 = Conv3D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling3D(size = (2,2,2))(conv5))
    print("up6 shape:",up6.shape)
    merge6 = Concatenate(axis=4)([conv2,up6])
    conv6 = Conv3D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
    conv6 = Conv3D(32, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
    conv6 = BN(axis=-1)(conv6)
    print("conv6 shape:",conv6.shape)
        
    up7 = Conv3D(32, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling3D(size = (2,2,2))(conv6))
    print("up7 shape:",up7.shape)
    merge7 = Concatenate(axis=4)([conv1,up7])
    conv7 = Conv3D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
    conv7 = Conv3D(16, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
    conv7 = BN(axis=-1)(conv7)
    preds = Conv3D(n_cls, 1, kernel_initializer = 'he_normal')(conv7)
    print("preds shape:",preds.shape)
    
    return inputs,preds
コード例 #29
0
def Mask_Gen():
    model = Sequential()
    model.add(
        Conv2D(30,
               kernel_size=(5, 5),
               strides=(1, 1),
               padding='valid',
               activation='relu',
               input_shape=(374, 324, 3)))
    model.add(BN())
    model.add(Conv2D(30, kernel_size=(5, 5), strides=1, activation='relu'))
    model.add(BN())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))
    model.add(Conv2D(16, kernel_size=(3, 3), strides=1, activation='relu'))
    model.add(BN())
    model.add(Conv2D(16, kernel_size=(3, 3), strides=1, activation='relu'))
    model.add(BN())
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='valid'))
    model.add(Conv2D(16, kernel_size=(3, 3), strides=1, activation='relu'))
    model.add(BN())
    model.add(Conv2D(16, kernel_size=(3, 3), strides=1, activation='relu'))
    model.add(BN())
    model.add(Conv2D(16, kernel_size=(3, 3), strides=1, activation='relu'))
    model.add(Dropout(0.5))
    model.add(BN())
    model.add(Conv2D(1, kernel_size=(3, 3), strides=1, activation='relu'))
    # model.add(Reshape((81,69)))
    model.compile(loss=dice_coef, optimizer='adam', metrics=['accuracy'])
    model.summary()

    return model
コード例 #30
0
def build_residual_drop():
  """ ANCHOR """
  inputs = Input(shape=(3, 224, 224))
  net = Convolution2D(16, (3, 3), padding="same", kernel_regularizer=L2(weight_decay))(inputs)
  net = BN(axis=1)(net)
  net = Activation("relu")(net)
  #for i in range(18):
  net = _residual_drop(net, input_shape=(16, 32, 32), output_shape=(16, 32, 32))
  net = _residual_drop(net, input_shape=(16, 32, 32), output_shape=(16, 32, 32))