コード例 #1
0
ファイル: m.py プロジェクト: cottrell/notebooks
 def call(self, inputs):
     X, Z = inputs
     X_layer = kl.Dense(16, activation='linear')(X)
     Z_dense = kl.Dense(16, activation='linear')
     combined = list()
     for i in range(Z.shape[0]):
         z = Z_dense(Z[:,i])
         l = X_layer + z
         l = K.expand_dims(l, axis=1)
         combined.append(l)
     combined = kl.concatenate(combined, axis=1)
     # combined is now shape (batch_size, z_size, 16)
     l = ka.relu(combined)
     l = kl.Dense(16, activation='relu')(l)
     l = kl.Dense(16, activation='relu')(l)
     l = kl.Dense(16, activation='linear')(l)
     return l
コード例 #2
0
    def create_model(self, img_shape, num_class, d=32):

        concat_axis = 3
        inputs = layers.Input(shape=img_shape)

        conv1 = layers.Conv2D(d, (3, 3),
                              activation='relu',
                              padding='same',
                              name='conv1_1')(inputs)
        conv1 = layers.Conv2D(d, (3, 3), activation='relu',
                              padding='same')(conv1)
        pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)
        conv2 = layers.Conv2D(d * 2, (3, 3), activation='relu',
                              padding='same')(pool1)
        conv2 = layers.Conv2D(d * 2, (3, 3), activation='relu',
                              padding='same')(conv2)
        pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = layers.Conv2D(d * 4, (3, 3), activation='relu',
                              padding='same')(pool2)
        conv3 = layers.Conv2D(d * 4, (3, 3), activation='relu',
                              padding='same')(conv3)
        pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = layers.Conv2D(d * 8, (3, 3), activation='relu',
                              padding='same')(pool3)
        conv4 = layers.Conv2D(d * 8, (3, 3), activation='relu',
                              padding='same')(conv4)
        pool4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)

        conv5 = layers.Conv2D(d * 16, (3, 3),
                              activation='relu',
                              padding='same')(pool4)
        conv5 = layers.Conv2D(d * 16, (3, 3),
                              activation='relu',
                              padding='same')(conv5)

        up_conv5 = layers.UpSampling2D(size=(2, 2))(conv5)
        ch, cw = self.get_crop_shape(conv4, up_conv5)
        crop_conv4 = layers.Cropping2D(cropping=(ch, cw))(conv4)
        up6 = layers.concatenate([up_conv5, crop_conv4], axis=concat_axis)
        conv6 = layers.Conv2D(d * 8, (3, 3), activation='relu',
                              padding='same')(up6)
        conv6 = layers.Conv2D(d * 8, (3, 3), activation='relu',
                              padding='same')(conv6)

        up_conv6 = layers.UpSampling2D(size=(2, 2))(conv6)
        ch, cw = self.get_crop_shape(conv3, up_conv6)
        crop_conv3 = layers.Cropping2D(cropping=(ch, cw))(conv3)
        up7 = layers.concatenate([up_conv6, crop_conv3], axis=concat_axis)
        conv7 = layers.Conv2D(d * 4, (3, 3), activation='relu',
                              padding='same')(up7)
        conv7 = layers.Conv2D(d * 4, (3, 3), activation='relu',
                              padding='same')(conv7)

        up_conv7 = layers.UpSampling2D(size=(2, 2))(conv7)
        ch, cw = self.get_crop_shape(conv2, up_conv7)
        crop_conv2 = layers.Cropping2D(cropping=(ch, cw))(conv2)
        up8 = layers.concatenate([up_conv7, crop_conv2], axis=concat_axis)
        conv8 = layers.Conv2D(d * 2, (3, 3), activation='relu',
                              padding='same')(up8)
        conv8 = layers.Conv2D(d * 2, (3, 3), activation='relu',
                              padding='same')(conv8)

        up_conv8 = layers.UpSampling2D(size=(2, 2))(conv8)
        ch, cw = self.get_crop_shape(conv1, up_conv8)
        crop_conv1 = layers.Cropping2D(cropping=(ch, cw))(conv1)
        up9 = layers.concatenate([up_conv8, crop_conv1], axis=concat_axis)
        conv9 = layers.Conv2D(d, (3, 3), activation='relu',
                              padding='same')(up9)
        conv9 = layers.Conv2D(d, (3, 3), activation='relu',
                              padding='same')(conv9)

        ch, cw = self.get_crop_shape(inputs, conv9)
        conv9 = layers.ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0],
                                                               cw[1])))(conv9)
        conv10 = layers.Conv2D(num_class, (1, 1), activation="sigmoid")(conv9)

        model = models.Model(inputs=inputs, outputs=conv10)

        return model
コード例 #3
0
def recon_generator_model(input_shape,
                          n_layers_unet,
                          dilation_rate,
                          use_cnnt=False):
    inputs = Input(shape=input_shape)
    label2idx = {'input': 0}

    _tmp = Conv2D(filters=48,
                  kernel_size=17,
                  dilation_rate=dilation_rate,
                  padding='same',
                  activation=None,
                  use_bias=True)(inputs)
    _tmp = tfa.layers.InstanceNormalization(
        axis=3,
        center=True,
        scale=True,
        beta_initializer="random_uniform",
        gamma_initializer="random_uniform")(_tmp)

    _tmp = inputs_concat = layers.LeakyReLU(alpha=0.2)(_tmp)
    ly_outs = [
        _tmp,
    ]

    _tmp = unet_conv_block(ly_outs[-1], 48, dilation_rate)
    ly_outs.append(_tmp)
    label2idx['box1_out'] = len(ly_outs) - 1

    for ly, nch in zip(range(2, n_layers_unet + 1),
                       (64, 96, 192, 384, 512, 512, 512, 512)):
        _tmp = Conv2D(filters=nch, kernel_size=3, strides=2, \
                padding='same', activation=None, use_bias = True)(ly_outs[-1])

        _tmp = tfa.layers.InstanceNormalization(
            axis=3,
            center=True,
            scale=True,
            beta_initializer="random_uniform",
            gamma_initializer="random_uniform")(_tmp)

        _tmp = layers.LeakyReLU(alpha=0.2)(_tmp)

        _tmp = unet_conv_block(_tmp, nch, dilation_rate)

        ly_outs.append(_tmp)
        label2idx['box%d_out' % (ly)] = len(ly_outs) - 1

    # intermediate layers
    _tmp = Conv2D(filters=ly_outs[-1].shape[-1], kernel_size=3, strides=2, \
            padding='same', activation=None, use_bias = True)(ly_outs[-1])

    _tmp = tfa.layers.InstanceNormalization(
        axis=3,
        center=True,
        scale=True,
        beta_initializer="random_uniform",
        gamma_initializer="random_uniform")(_tmp)

    _tmp = intermediate_layer = layers.LeakyReLU(alpha=0.2)(_tmp)

    ly_outs.append(_tmp)

    for ly, nch in zip(range(1, n_layers_unet + 1),
                       (512, 512, 512, 384, 192, 96, 64, 48, 32)):

        if use_cnnt:
            _tmp = Conv2DTranspose(filters=ly_outs[-1].shape[-1], activation=None, \
                        kernel_size=4, strides=(2, 2), padding='same')(ly_outs[-1])
            _tmp = layers.LeakyReLU(alpha=0.2)(_tmp)
        else:
            _tmp = UpSampling2D(size=(2, 2),
                                interpolation='bilinear')(ly_outs[-1])
        _tmp = layers.concatenate(
            [ly_outs[label2idx['box%d_out' % (n_layers_unet - ly + 1)]], _tmp])

        _tmp = unet_conv_block(_tmp, nch, dilation_rate)
        ly_outs.append(_tmp)

    _tmp = current_sinogram = Conv2D(filters=1,
                                     kernel_size=1,
                                     padding='same',
                                     activation=None,
                                     use_bias=True)(_tmp)

    return tf.keras.models.Model(inputs, [_tmp, intermediate_layer])
コード例 #4
0
def get_unet_batch(base_dense, img_w, img_h, img_ch, dropout=False, dr=0.2):
    input_size = (img_w, img_h, img_ch)
    input_layer = Input(shape=input_size, name='input_layer')

    if dropout == True:
        conv1 = conv_block(input_layer, base_dense, BatchNorm=True)
        pool1 = MaxPooling2D((2, 2))(conv1)
        pool1 = Dropout(dr)(pool1)

        conv2 = conv_block(pool1, base_dense * 2, BatchNorm=True)
        pool2 = MaxPooling2D((2, 2))(conv2)
        pool2 = Dropout(dr)(pool2)

        conv3 = conv_block(pool2, base_dense * 4, BatchNorm=True)
        pool3 = MaxPooling2D((2, 2))(conv3)
        pool3 = Dropout(dr)(pool3)

        conv4 = conv_block(pool3, base_dense * 8, BatchNorm=True)
        pool4 = MaxPooling2D((2, 2))(conv4)
        pool4 = Dropout(dr)(pool4)

        #middle
        convm = conv_block(pool4, base_dense * 16, BatchNorm=True)

        #deconvolution
        deconv1 = Conv2DTranspose(base_dense * 8, (3, 3),
                                  strides=(2, 2),
                                  padding="same",
                                  activation="relu")(convm)
        uconv1 = concatenate([deconv1, conv4])
        uconv1 = Dropout(dr)(uconv1)
        uconv1 = conv_block(uconv1, base_dense * 8, BatchNorm=True)

        deconv2 = Conv2DTranspose(base_dense * 4, (3, 3),
                                  strides=(2, 2),
                                  padding="same",
                                  activation="relu")(uconv1)
        uconv2 = concatenate([deconv2, conv3])
        uconv2 = Dropout(dr)(uconv2)
        uconv2 = conv_block(uconv2, base_dense * 4, BatchNorm=True)

        deconv3 = Conv2DTranspose(base_dense * 2, (3, 3),
                                  strides=(2, 2),
                                  padding="same",
                                  activation="relu")(uconv2)
        uconv3 = concatenate([deconv3, conv2])
        uconv3 = Dropout(dr)(uconv3)
        uconv3 = conv_block(uconv3, base_dense * 2, BatchNorm=True)

        deconv4 = Conv2DTranspose(base_dense, (3, 3),
                                  strides=(2, 2),
                                  padding="same",
                                  activation="relu")(uconv3)
        uconv4 = concatenate([deconv4, conv1])
        uconv4 = Dropout(dr)(uconv4)
        uconv4 = conv_block(uconv4, base_dense, BatchNorm=True)

    else:
        conv1 = conv_block(input_layer, base_dense, BatchNorm=True)
        pool1 = MaxPooling2D((2, 2))(conv1)

        conv2 = conv_block(pool1, base_dense * 2, BatchNorm=True)
        pool2 = MaxPooling2D((2, 2))(conv2)

        conv3 = conv_block(pool2, base_dense * 4, BatchNorm=True)
        pool3 = MaxPooling2D((2, 2))(conv3)

        conv4 = conv_block(pool3, base_dense * 8, BatchNorm=True)
        pool4 = MaxPooling2D((2, 2))(conv4)

        #middle
        convm = conv_block(pool4, base_dense * 16, BatchNorm=True)

        #deconvolution
        deconv1 = Conv2DTranspose(base_dense * 8, (3, 3),
                                  strides=(2, 2),
                                  padding="same",
                                  activation="relu")(convm)
        uconv1 = concatenate([deconv1, conv4])
        uconv1 = conv_block(uconv1, base_dense * 8, BatchNorm=True)

        deconv2 = Conv2DTranspose(base_dense * 4, (3, 3),
                                  strides=(2, 2),
                                  padding="same",
                                  activation="relu")(uconv1)
        uconv2 = concatenate([deconv2, conv3])
        uconv2 = conv_block(uconv2, base_dense * 4, BatchNorm=True)

        deconv3 = Conv2DTranspose(base_dense * 2, (3, 3),
                                  strides=(2, 2),
                                  padding="same",
                                  activation="relu")(uconv2)
        uconv3 = concatenate([deconv3, conv2])
        uconv3 = conv_block(uconv3, base_dense * 2, BatchNorm=True)

        deconv4 = Conv2DTranspose(base_dense, (3, 3),
                                  strides=(2, 2),
                                  padding="same",
                                  activation="relu")(uconv3)
        uconv4 = concatenate([deconv4, conv1])
        uconv4 = conv_block(uconv4, base_dense, BatchNorm=True)

    output_layer = Conv2D(1, (1, 1),
                          padding='same',
                          activation='sigmoid',
                          name='output_layer')(uconv4)

    model = Model(inputs=input_layer, outputs=output_layer)
    model.summary()

    return model
コード例 #5
0
ファイル: train_source.py プロジェクト: bes82/Motion_Blur
def Phase1_Net(img_size, num_classes):
    inputs = Input(shape=img_size + (3, ))

    x = Conv2D(64, kernel_size=3, strides=(1, 1), padding="same")(inputs)
    x = BatchNormalization()(x)
    x = Activation(activations.relu)(x)

    previous_block_concatenate1 = x
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)

    x = Conv2D(128, kernel_size=3, strides=(1, 1), padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation(activations.relu)(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)

    previous_block_concatenate2 = x

    concate_block_num = 3
    for filters in [256, 512, 512]:
        x = Conv2D(filters, 3, strides=(1, 1), padding="same")(x)
        x = BatchNormalization()(x)
        x = Activation(activations.relu)(x)
        x = Conv2D(filters, 3, strides=1, padding="same")(x)
        x = BatchNormalization()(x)
        x = Activation(activations.relu)(x)
        x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
        globals()['previous_block_concatenate%s' % concate_block_num] = x
        concate_block_num = concate_block_num + 1
        print(("No errors for filter size:" + str(filters)))

    x = Conv2D(512, 3, strides=1, padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation(activations.relu)(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)

    x = Conv2D(512, 3, strides=1, padding="same")(x)
    x = BatchNormalization()(x)
    x = Activation(activations.relu)(x)

    x = Conv2DTranspose(256, 2, strides=(2, 2))(x)
    x = BatchNormalization()(x)
    x = Activation(activations.relu)(x)

    x = concatenate([x, previous_block_concatenate5], axis=-1)

    x = Conv2DTranspose(256, 2, strides=(2, 2))(x)
    x = BatchNormalization()(x)
    x = Activation(activations.relu)(x)

    x = concatenate([x, previous_block_concatenate4], axis=-1)

    x = Conv2DTranspose(128, 2, strides=(2, 2))(x)
    x = BatchNormalization()(x)
    x = Activation(activations.relu)(x)

    x = concatenate([x, previous_block_concatenate3], axis=-1)

    x = Conv2DTranspose(64, 2, strides=(2, 2))(x)
    x = BatchNormalization()(x)
    x = Activation(activations.relu)(x)

    x = concatenate([x, previous_block_concatenate2], axis=-1)

    x = Conv2DTranspose(32, 2, strides=(2, 2))(x)
    x = BatchNormalization()(x)
    x = Activation(activations.relu)(x)

    x = Conv2DTranspose(64, 2, strides=(2, 2))(x)
    x = BatchNormalization()(x)
    x = Activation(activations.relu)(x)

    x = concatenate([x, previous_block_concatenate1], axis=-1)

    x = Conv2D(32, 3, strides=(1, 1), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation(activations.relu)(x)
    x = Conv2D(num_classes, 3, strides=(1, 1), padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation(activations.relu)(x)
    outputs = Conv2D(num_classes,
                     3,
                     strides=(1, 1),
                     activation='softmax',
                     padding='same',
                     name='sRBC_classes')(x)
    model = Model(inputs, outputs)

    return model
コード例 #6
0
ファイル: cnn_bgru.py プロジェクト: pkkumar95/PSSP
testpssm_11 = pickle.load(open('./pickles/testpssm_casp11.pickle', 'rb'))
testlabel_11 = pickle.load(open('./pickles/testlabel_casp11.pickle', 'rb'))

with tf.device('/device:GPU:1'):
    config=tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.gpu_options.per_process_gpu_memory_fraction = 0.4
    K.tensorflow_backend.set_session(tf.Session(config=config))
    
    layer_size = 42
    
    #main_input = Input(shape=(700,20), name='main_input')
    #main_input = Embedding(output_dim=1, input_dim=20, input_length=700)(main_input)
    main_input = Input(shape=(700,21), name='main_input')
    aux_input = Input(shape=(700,21), name='aux_input')
    input_features = concatenate([main_input, aux_input], axis=-1, name='c1')
    
    c_input = Reshape((700,42,1))(input_features)
    
    c_output = Conv2D(layer_size, (3,3), activation='relu', padding='same', bias_regularizer=l2(0.001))(c_input)
    print('c_output: ', c_output.get_shape())
    
    m_output = MaxPooling2D((1,2), strides=None, padding='same')(c_output)
    print('m_output: ', m_output.get_shape())
    
    m_output = Reshape((700,42*21))(m_output)
    m_output = Dropout(0.2)(m_output)
    d_output = Dense(400, activation='relu')(m_output)
    
    #Bidirectional RNN with LSTM cells
    f1 = GRU(250, return_sequences=True, activation='tanh', recurrent_activation='sigmoid', dropout=0.2, recurrent_dropout=0.2)(d_output)
コード例 #7
0
def get_unet_1024(input_shape=(1024, 1024, 3), num_classes=1):
    inputs = Input(shape=input_shape)
    # 1024

    down0b = Conv2D(8, (3, 3), padding='same')(inputs)
    down0b = BatchNormalization()(down0b)
    down0b = Activation('relu')(down0b)
    down0b = Conv2D(8, (3, 3), padding='same')(down0b)
    down0b = BatchNormalization()(down0b)
    down0b = Activation('relu')(down0b)
    down0b_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0b)
    # 512

    down0a = Conv2D(16, (3, 3), padding='same')(down0b_pool)
    down0a = BatchNormalization()(down0a)
    down0a = Activation('relu')(down0a)
    down0a = Conv2D(16, (3, 3), padding='same')(down0a)
    down0a = BatchNormalization()(down0a)
    down0a = Activation('relu')(down0a)
    down0a_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0a)
    # 256

    down0 = Conv2D(32, (3, 3), padding='same')(down0a_pool)
    down0 = BatchNormalization()(down0)
    down0 = Activation('relu')(down0)
    down0 = Conv2D(32, (3, 3), padding='same')(down0)
    down0 = BatchNormalization()(down0)
    down0 = Activation('relu')(down0)
    down0_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0)
    # 128

    down1 = Conv2D(64, (3, 3), padding='same')(down0_pool)
    down1 = BatchNormalization()(down1)
    down1 = Activation('relu')(down1)
    down1 = Conv2D(64, (3, 3), padding='same')(down1)
    down1 = BatchNormalization()(down1)
    down1 = Activation('relu')(down1)
    down1_pool = MaxPooling2D((2, 2), strides=(2, 2))(down1)
    # 64

    down2 = Conv2D(128, (3, 3), padding='same')(down1_pool)
    down2 = BatchNormalization()(down2)
    down2 = Activation('relu')(down2)
    down2 = Conv2D(128, (3, 3), padding='same')(down2)
    down2 = BatchNormalization()(down2)
    down2 = Activation('relu')(down2)
    down2_pool = MaxPooling2D((2, 2), strides=(2, 2))(down2)
    # 32

    down3 = Conv2D(256, (3, 3), padding='same')(down2_pool)
    down3 = BatchNormalization()(down3)
    down3 = Activation('relu')(down3)
    down3 = Conv2D(256, (3, 3), padding='same')(down3)
    down3 = BatchNormalization()(down3)
    down3 = Activation('relu')(down3)
    down3_pool = MaxPooling2D((2, 2), strides=(2, 2))(down3)
    # 16

    down4 = Conv2D(512, (3, 3), padding='same')(down3_pool)
    down4 = BatchNormalization()(down4)
    down4 = Activation('relu')(down4)
    down4 = Conv2D(512, (3, 3), padding='same')(down4)
    down4 = BatchNormalization()(down4)
    down4 = Activation('relu')(down4)
    down4_pool = MaxPooling2D((2, 2), strides=(2, 2))(down4)
    # 8

    center = Conv2D(1024, (3, 3), padding='same')(down4_pool)
    center = BatchNormalization()(center)
    center = Activation('relu')(center)
    center = Conv2D(1024, (3, 3), padding='same')(center)
    center = BatchNormalization()(center)
    center = Activation('relu')(center)
    # center

    up4 = UpSampling2D((2, 2))(center)
    up4 = concatenate([down4, up4], axis=3)
    up4 = Conv2D(512, (3, 3), padding='same')(up4)
    up4 = BatchNormalization()(up4)
    up4 = Activation('relu')(up4)
    up4 = Conv2D(512, (3, 3), padding='same')(up4)
    up4 = BatchNormalization()(up4)
    up4 = Activation('relu')(up4)
    up4 = Conv2D(512, (3, 3), padding='same')(up4)
    up4 = BatchNormalization()(up4)
    up4 = Activation('relu')(up4)
    # 16

    up3 = UpSampling2D((2, 2))(up4)
    up3 = concatenate([down3, up3], axis=3)
    up3 = Conv2D(256, (3, 3), padding='same')(up3)
    up3 = BatchNormalization()(up3)
    up3 = Activation('relu')(up3)
    up3 = Conv2D(256, (3, 3), padding='same')(up3)
    up3 = BatchNormalization()(up3)
    up3 = Activation('relu')(up3)
    up3 = Conv2D(256, (3, 3), padding='same')(up3)
    up3 = BatchNormalization()(up3)
    up3 = Activation('relu')(up3)
    # 32

    up2 = UpSampling2D((2, 2))(up3)
    up2 = concatenate([down2, up2], axis=3)
    up2 = Conv2D(128, (3, 3), padding='same')(up2)
    up2 = BatchNormalization()(up2)
    up2 = Activation('relu')(up2)
    up2 = Conv2D(128, (3, 3), padding='same')(up2)
    up2 = BatchNormalization()(up2)
    up2 = Activation('relu')(up2)
    up2 = Conv2D(128, (3, 3), padding='same')(up2)
    up2 = BatchNormalization()(up2)
    up2 = Activation('relu')(up2)
    # 64

    up1 = UpSampling2D((2, 2))(up2)
    up1 = concatenate([down1, up1], axis=3)
    up1 = Conv2D(64, (3, 3), padding='same')(up1)
    up1 = BatchNormalization()(up1)
    up1 = Activation('relu')(up1)
    up1 = Conv2D(64, (3, 3), padding='same')(up1)
    up1 = BatchNormalization()(up1)
    up1 = Activation('relu')(up1)
    up1 = Conv2D(64, (3, 3), padding='same')(up1)
    up1 = BatchNormalization()(up1)
    up1 = Activation('relu')(up1)
    # 128

    up0 = UpSampling2D((2, 2))(up1)
    up0 = concatenate([down0, up0], axis=3)
    up0 = Conv2D(32, (3, 3), padding='same')(up0)
    up0 = BatchNormalization()(up0)
    up0 = Activation('relu')(up0)
    up0 = Conv2D(32, (3, 3), padding='same')(up0)
    up0 = BatchNormalization()(up0)
    up0 = Activation('relu')(up0)
    up0 = Conv2D(32, (3, 3), padding='same')(up0)
    up0 = BatchNormalization()(up0)
    up0 = Activation('relu')(up0)
    # 256

    up0a = UpSampling2D((2, 2))(up0)
    up0a = concatenate([down0a, up0a], axis=3)
    up0a = Conv2D(16, (3, 3), padding='same')(up0a)
    up0a = BatchNormalization()(up0a)
    up0a = Activation('relu')(up0a)
    up0a = Conv2D(16, (3, 3), padding='same')(up0a)
    up0a = BatchNormalization()(up0a)
    up0a = Activation('relu')(up0a)
    up0a = Conv2D(16, (3, 3), padding='same')(up0a)
    up0a = BatchNormalization()(up0a)
    up0a = Activation('relu')(up0a)
    # 512

    up0b = UpSampling2D((2, 2))(up0a)
    up0b = concatenate([down0b, up0b], axis=3)
    up0b = Conv2D(8, (3, 3), padding='same')(up0b)
    up0b = BatchNormalization()(up0b)
    up0b = Activation('relu')(up0b)
    up0b = Conv2D(8, (3, 3), padding='same')(up0b)
    up0b = BatchNormalization()(up0b)
    up0b = Activation('relu')(up0b)
    up0b = Conv2D(8, (3, 3), padding='same')(up0b)
    up0b = BatchNormalization()(up0b)
    up0b = Activation('relu')(up0b)
    # 1024

    classify = Conv2D(num_classes, (1, 1), activation='sigmoid')(up0b)

    model = Model(inputs=inputs, outputs=classify)

    #model.compile(optimizer=RMSprop(lr=0.0001), loss=bce_dice_loss, metrics=[dice_coeff])
    model.compile(optimizer='adam', loss=bce_dice_loss, metrics=[dice_coeff])

    return model
コード例 #8
0
def train():
    k = 0

    AUTOTUNE = tf.data.experimental.AUTOTUNE
    # Slice the input image path string, get a dataset about string
    path_image_ds = tf.data.Dataset.from_tensor_slices(Image_path)
    # Load all images into dataset
    image_ds = path_image_ds.map(read_and_load, num_parallel_calls=AUTOTUNE)
    # Slice the input label path string, get a dataset about string
    path_label_ds = tf.data.Dataset.from_tensor_slices(Label_path)
    # Load all labels into dataset
    label_ds = path_label_ds.map(read_and_load, num_parallel_calls=AUTOTUNE)
    # Pack images and labels together
    image_label_ds = tf.data.Dataset.zip((image_ds, label_ds))
    # Shuffle, Repeat, Batch operations
    ds = image_label_ds.shuffle(buffer_size=Buffer_size)
    ds = ds.repeat()
    ds = ds.batch(Batch_size)
    ds = ds.prefetch(buffer_size=AUTOTUNE)

    g_optimizer = tf.keras.optimizers.Adam(learning_rate=Learning_rate_gen, beta_1=Beta_1, beta_2=Beta_2, epsilon=E)
    d_optimizer = tf.keras.optimizers.Adam(learning_rate=Learning_rate_disc, beta_1=Beta_1, beta_2=Beta_2, epsilon=E)

    # Build Discriminator
    discriminator = build_discriminator()
    discriminator.build(input_shape=(Batch_size, L_node, W_node, Channel * 1))

    # Build Generator
    generator = build_generator()
    generator.build(input_shape=(Batch_size, L_node, W_node, Channel * 2))
    generator.summary()

    cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)

    for epoch in range(Training_steps):
        for data, label in ds:
            with tf.GradientTape() as Tape:
                gen_input = label
                # gen_input = layers.concatenate([data, label], 3)
                gen_output = generator(gen_input, training=False)
                disc_input_real = layers.concatenate([data, label], 3)
                # disc_input_real = data
                disc_input_fake = layers.concatenate([gen_output, label], 3)
                # disc_input_fake = gen_output
                # if k != 0:
                #     print(1, disc_fake[0, ])
                disc_real = discriminator(disc_input_real, training=True)
                disc_fake = discriminator(disc_input_fake, training=True)
                d_loss_real = cross_entropy(tf.ones_like(disc_real) * 0.9, disc_real)
                d_loss_fake = cross_entropy(tf.zeros_like(disc_fake) + 0.1, disc_fake)
                d_loss = d_loss_real + d_loss_fake

            d_gradients = Tape.gradient(d_loss, discriminator.trainable_variables)
            d_optimizer.apply_gradients(zip(d_gradients, discriminator.trainable_variables))

            with tf.GradientTape() as Tape:
                gen_input = label
                # gen_input = layers.concatenate([data, label], 3)
                gen_output = generator(gen_input, training=True)
                disc_input_fake = layers.concatenate([gen_output, label], 3)
                # disc_input_fake = gen_output
                disc_fake = discriminator(disc_input_fake, training=False)
                # print(2, disc_fake[0, ])
                g_loss_entropy = Lambda_entropy * cross_entropy(tf.ones_like(disc_fake) * 0.9, disc_fake)
                g_loss_l1 = Lambda_l1 * tf.reduce_mean(tf.abs(gen_output - data))
                g_loss_ssim = Lambda_ssim * tf.reduce_mean(1 - tf.image.ssim(gen_output, data, max_val=1))
                # g_loss_l1_c1 = tf.reduce_mean(tf.abs(gen_output[:, :, :, 0] - data[:, :, :, 0]))
                # g_loss_l1_c2 = tf.reduce_mean(tf.abs(gen_output[:, :, :, 1] - data[:, :, :, 1]))
                # g_loss_l1_c3 = tf.reduce_mean(tf.abs(gen_output[:, :, :, 2] - data[:, :, :, 2]))
                # g_loss_l1 = Lambda * (g_loss_l1_c1 + g_loss_l1_c2 + g_loss_l1_c2 + g_loss_l1_c3)
                g_loss = g_loss_entropy + g_loss_l1 + g_loss_ssim

            g_gradients = Tape.gradient(g_loss, generator.trainable_variables)
            g_optimizer.apply_gradients(zip(g_gradients, generator.trainable_variables))

            if k % 500 == 0:
                # print("Step:{}, Generator Loss:{:.4f}, L1 Loss:{:.4f}, SSIM Loss:{:.4f}, Discriminator Loss:{:.4f}".format(k, g_loss, g_loss_l1 / Lambda, g_loss_ssim / Lambda1, d_loss))
                print("Step:{} Generator Loss:{:.4f} L1 Loss:{:.4f} SSIM Loss:{:.4f} Discriminator Loss:{:.4f}".format(k, g_loss, g_loss_l1 / Lambda_l1, g_loss_ssim / Lambda_ssim, d_loss))
                # print("Step:{} Generator Loss:{:.4f} SSIM Loss:{:.4f} Discriminator Loss:{:.4f}".format(l, g_loss, g_loss_ssim / Lambda_ssim, d_loss)
                output_save = np.reshape(gen_output[0], newshape=[L_node, W_node])
                cv2.imwrite(Output_dir + str(k) + '.jpg', output_save * 255.)

            if k % 2000 == 0:
                generator.save(Save_dir + str(k) + 'Gmodel' + '.h5')
                discriminator.save(Save_dir + str(k) + 'Dmodel' + '.h5')

            k = k + 1
コード例 #9
0
def unet_model_3d(input_shape,
                  pool_size=(2, 2, 2),
                  n_labels=1,
                  initial_learning_rate=0.00001,
                  deconvolution=False,
                  depth=4,
                  n_base_filters=32,
                  include_label_wise_dice_coefficients=False,
                  metrics=dice_coefficient,
                  batch_normalization=False,
                  activation_name="sigmoid"):
    """
    Builds the 3D UNet Keras model.f
    :param metrics: List metrics to be calculated during model training (default is dice coefficient).
    :param include_label_wise_dice_coefficients: If True and n_labels is greater than 1, model will report the dice
    coefficient for each label as metric.
    :param n_base_filters: The number of filters that the first layer in the convolution network will have. Following
    layers will contain a multiple of this number. Lowering this number will likely reduce the amount of memory required
    to train the model.
    :param depth: indicates the depth of the U-shape for the model. The greater the depth, the more max pooling
    layers will be added to the model. Lowering the depth may reduce the amount of memory required for training.
    :param input_shape: Shape of the input data (n_chanels, x_size, y_size, z_size). The x, y, and z sizes must be
    divisible by the pool size to the power of the depth of the UNet, that is pool_size^depth.
    :param pool_size: Pool size for the max pooling operations.
    :param n_labels: Number of binary labels that the model is learning.
    :param initial_learning_rate: Initial learning rate for the model. This will be decayed during training.
    :param deconvolution: If set to True, will use transpose convolution(deconvolution) instead of up-sampling. This
    increases the amount memory required during training.
    :return: Untrained 3D UNet Model
    """
    inputs = Input(input_shape)
    current_layer = inputs
    levels = list()

    # add levels with max pooling
    for layer_depth in range(depth):
        layer1 = create_convolution_block(
            input_layer=current_layer,
            n_filters=n_base_filters * (2**layer_depth),
            batch_normalization=batch_normalization)
        layer2 = create_convolution_block(
            input_layer=layer1,
            n_filters=n_base_filters * (2**layer_depth) * 2,
            batch_normalization=batch_normalization)
        if layer_depth < depth - 1:
            current_layer = MaxPooling3D(pool_size=pool_size)(layer2)
            levels.append([layer1, layer2, current_layer])
        else:
            current_layer = layer2
            levels.append([layer1, layer2])

    # add levels with up-convolution or up-sampling
    for layer_depth in range(depth - 2, -1, -1):
        up_convolution = get_up_convolution(
            pool_size=pool_size,
            deconvolution=deconvolution,
            n_filters=(current_layer.shape[1]))(current_layer)

        padded_up_convolution = pad_for_concatenation(
            up_convolution, levels[layer_depth][1].shape)

        concat = concatenate([padded_up_convolution, levels[layer_depth][1]],
                             axis=1)

        current_layer = create_convolution_block(
            n_filters=levels[layer_depth][1].shape[1],
            input_layer=concat,
            batch_normalization=batch_normalization)
        current_layer = create_convolution_block(
            n_filters=levels[layer_depth][1].shape[1],
            input_layer=current_layer,
            batch_normalization=batch_normalization)

    final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)
    act = Activation(activation_name)(final_convolution)
    model = Model(inputs=inputs, outputs=act)

    if not isinstance(metrics, list):
        metrics = [metrics]

    if include_label_wise_dice_coefficients and n_labels > 1:
        label_wise_dice_metrics = [
            get_label_dice_coefficient_function(index)
            for index in range(n_labels)
        ]
        if metrics:
            metrics = metrics + label_wise_dice_metrics
        else:
            metrics = label_wise_dice_metrics

    model.compile(optimizer=Adam(lr=initial_learning_rate),
                  loss=dice_coefficient_loss,
                  metrics=metrics)
    return model
コード例 #10
0
    def build_model(self, n_features):
        """
        The method builds a new member of the ensemble and returns it.
        """
        # derived parameters
        self.hyperparameters['n_members'] = self.hyperparameters[
            'n_segments'] * self.hyperparameters['n_members_segment']

        # initialize optimizer and early stopping
        self.optimizer = Adam(lr=self.hyperparameters['lr'],
                              beta_1=0.9,
                              beta_2=0.999,
                              epsilon=None,
                              decay=0.,
                              amsgrad=False)

        self.es = EarlyStopping(monitor=f'val_{self.loss_name}',
                                min_delta=0.0,
                                patience=self.hyperparameters['patience'],
                                verbose=1,
                                mode='min',
                                restore_best_weights=True)

        inputs = Input(shape=(n_features, ))
        h = GaussianNoise(self.hyperparameters['noise_in'],
                          name='noise_input')(inputs)

        for i in range(self.hyperparameters['layers']):
            h = Dense(self.hyperparameters['neurons'],
                      activation=self.hyperparameters['activation'],
                      kernel_regularizer=regularizers.l1_l2(
                          self.hyperparameters['l1_hidden'],
                          self.hyperparameters['l2_hidden']),
                      kernel_initializer='random_uniform',
                      bias_initializer='zeros',
                      name=f'hidden_{i}')(h)

            h = Dropout(self.hyperparameters['dropout'],
                        name=f'hidden_dropout_{i}')(h)

        mu = Dense(1,
                   activation='linear',
                   kernel_regularizer=regularizers.l1_l2(
                       self.hyperparameters['l1_mu'],
                       self.hyperparameters['l2_mu']),
                   kernel_initializer='random_uniform',
                   bias_initializer='zeros',
                   name='mu_output')(h)

        mu = GaussianNoise(self.hyperparameters['noise_mu'],
                           name='noise_mu')(mu)

        if self.hyperparameters['pdf'] == 'normal' or self.hyperparameters[
                'pdf'] == 'skewed':
            sigma = Dense(1,
                          activation='softplus',
                          kernel_regularizer=regularizers.l1_l2(
                              self.hyperparameters['l1_sigma'],
                              self.hyperparameters['l2_sigma']),
                          kernel_initializer='random_uniform',
                          bias_initializer='zeros',
                          name='sigma_output')(h)

            sigma = GaussianNoise(self.hyperparameters['noise_sigma'],
                                  name='noise_sigma')(sigma)

        if self.hyperparameters['pdf'] == 'skewed':
            alpha = Dense(1,
                          activation='linear',
                          kernel_regularizer=regularizers.l1_l2(
                              self.hyperparameters['l1_alpha'],
                              self.hyperparameters['l2_alpha']),
                          kernel_initializer='random_uniform',
                          bias_initializer='zeros',
                          name='alpha_output')(h)

            alpha = GaussianNoise(self.hyperparameters['noise_alpha'],
                                  name='noise_alpha')(alpha)

        if self.hyperparameters['pdf'] is None:
            outputs = mu
        elif self.hyperparameters['pdf'] == 'normal':
            outputs = concatenate([mu, sigma])
        elif self.hyperparameters['pdf'] == 'skewed':
            outputs = concatenate([mu, sigma, alpha])

        model = Model(inputs=inputs, outputs=outputs)
        return model
コード例 #11
0
ファイル: models.py プロジェクト: torresxavier/cgm-ml
    def call(self, inputs):

        point_cloud = inputs
        self.point_cloud_in = point_cloud
        assert_shape_is(point_cloud, (1024, 3))

        # First attention layer with one head.
        onehead_attention = self.onehead_attention(point_cloud)
        onehead_attention_features = onehead_attention[0]
        onehead_graph_features = onehead_attention[1]
        onehead_attention_coefficients = onehead_attention[2]
        self.onehead_attention_coefficients_out = onehead_attention_coefficients
        assert_shape_is(onehead_attention_features, (1024, 1, 16))
        assert_shape_is(onehead_graph_features, (1024, 20, 16))
        assert_shape_is(onehead_attention_coefficients, (1024, 1, 20))

        # Skip connection from point cloud to attention features.
        point_cloud_expanded = K.expand_dims(point_cloud, axis=2)
        assert_shape_is(point_cloud_expanded, (1024, 1, 3))
        onehead_attention_features = K.concatenate(
            [onehead_attention_features, point_cloud_expanded])
        assert_shape_is(onehead_attention_features, (1024, 1, 19))
        del point_cloud_expanded

        # Spatial transform.
        point_cloud_transformed = self.transform(
            [point_cloud, onehead_attention_features, onehead_graph_features])
        assert_shape_is(point_cloud_transformed, (1024, 3))
        self.point_cloud_transformed_out = point_cloud_transformed
        del point_cloud

        # Second attention layer with four head.
        fourhead_attention = self.fourhead_attention(point_cloud_transformed)
        fourhead_attention_features = fourhead_attention[0]
        fourhead_graph_features = fourhead_attention[1]
        fourhead_attention_coefficients = fourhead_attention[2]
        self.fourhead_attention_coefficients_out = fourhead_attention_coefficients
        assert_shape_is(fourhead_attention_features, (1024, 1, 64))
        assert_shape_is(fourhead_graph_features, (1024, 20, 64))
        assert_shape_is(fourhead_attention_coefficients, (1024, 1, 80))

        # Skip connection from transformed point cloud to attention features.
        point_cloud_expanded = K.expand_dims(point_cloud_transformed, axis=2)
        assert_shape_is(point_cloud_expanded, (1024, 1, 3))
        onehead_attention_features = K.concatenate(
            [fourhead_attention_features, point_cloud_expanded])
        assert_shape_is(onehead_attention_features, (1024, 1, 67))

        # MLP 1 on attention features.
        net1 = self.mlp1(onehead_attention_features)
        net1 = self.mlp_bn1(net1)
        net1 = self.mlp_activation1(net1)
        assert_shape_is(net1, (1024, 1, 64))

        # MLP 2 on attention features.
        net2 = self.mlp2(net1)
        net2 = self.mlp_bn2(net2)
        net2 = self.mlp_activation2(net2)
        assert_shape_is(net2, (1024, 1, 64))

        # MLP 3 on attention features.
        net3 = self.mlp3(net2)
        net3 = self.mlp_bn3(net3)
        net3 = self.mlp_activation3(net3)
        assert_shape_is(net3, (1024, 1, 64))

        # MLP 4 on attention features.
        net4 = self.mlp4(net3)
        net4 = self.mlp_bn4(net4)
        net4 = self.mlp_activation4(net4)
        assert_shape_is(net4, (1024, 1, 128))

        # Maximum for graph features.
        fourhead_graph_features_max = tf.reduce_max(fourhead_graph_features,
                                                    axis=2,
                                                    keepdims=True)
        assert_shape_is(fourhead_graph_features_max, (1024, 1, 64))

        # Concatenate all MLPs and maximum of graph features.
        net = layers.concatenate(
            [net1, net2, net3, net4, fourhead_graph_features_max])
        assert_shape_is(net, (1024, 1, 384))

        # MLP 5.
        net = self.mlp5(net)
        net = self.mlp_bn5(net)
        net = self.mlp_activation5(net)
        assert_shape_is(net, (1024, 1, 1024))

        # Maximum for net.
        net = K.max(net, axis=1, keepdims=True)
        assert_shape_is(net, (1, 1, 1024))

        # Flatten.
        net = self.flatten(net)
        assert_shape_is(net, (1024, ))

        # Dense 1.
        net = self.dense1(net)
        net = self.dense_dropout1(net)
        assert_shape_is(net, (512, ))

        # Dense 2.
        net = self.dense2(net)
        net = self.dense_dropout2(net)
        assert_shape_is(net, (256, ))

        # Dense 3.
        net = self.dense3(net)
        #assert_shape_is(net, (40,))

        return net
コード例 #12
0
encoded = LeakyReLU(alpha=0.2, name="LeakyRelu2_NS")(encoded)
n_state = layers.Dense(state_dim, name="dense3_NS")(encoded)
AE = keras.Model(inputs=AE_state, outputs=n_state, name="AE")

#print(AE.summary())
#tf.keras.utils.plot_model(AE, to_file='AE_model_plot.png', show_shapes=True, show_layer_names=True)

opt_AE = tf.keras.optimizers.RMSprop(learning_rate=0.00015)
AE.compile(loss='mean_squared_error', optimizer=opt_AE, metrics=['mse'])

# This model maps an input & action to its next state
# Input state
curr_state = keras.Input(shape=(state_dim, ), name="curr_state")
curr_action = keras.Input(shape=(action_dim, ), name="curr_action")
# FDM model
curr_state_action = concatenate([curr_state, curr_action])
fdm_h1 = Dense(16, name="dense1_FDM")(curr_state_action)
fdm_h1 = LeakyReLU(alpha=0.2, name="LeakyRelu1_FDM")(fdm_h1)
fdm_h2 = Dense(16, name="dense2_FDM")(fdm_h1)
fdm_h2 = LeakyReLU(alpha=0.2, name="LeakyRelu2_FDM")(fdm_h2)
fdm_pred_state = layers.Dense(state_dim, name="dense3_FDM")(fdm_h2)
FDM = keras.Model(inputs=[curr_state, curr_action],
                  outputs=fdm_pred_state,
                  name="FDM")

#print(FDM.summary())
#tf.keras.utils.plot_model(FDM, to_file='FDM_model_plot.png', show_shapes=True, show_layer_names=True)

opt_FDM = tf.keras.optimizers.RMSprop(learning_rate=0.00015)
FDM.compile(loss='mean_squared_error', optimizer=opt_FDM, metrics=['mse'])
コード例 #13
0
def CNN(X_1, X_2, y, X_1_val, X_2_val, y_val, maxlen, vocab_size):

    tf.keras.backend.clear_session()

    # Hyperparameter optimization via Hyperas

    lr = {{choice([1e-2, 1e-3, 1e-4, 1e-5])}}  # Learning rate
    epochs = {{choice([10, 20, 30])}}  # Number of epochs
    batch_size = {{choice([64, 128])}}  # Batch size
    emb_dim = {{choice([32, 64])}}  # Embedding dimension
    num_cnn = {{choice([4, 6, 8])}}  # Number of CNN layers
    inter_pool = {{choice([0, 1, 3,
                           5])}}  # Number of intermittent POOLing layers
    num_inner_dnn = {{choice([2, 3])}}  # Size of inner DNN layers
    num_outer_dnn = {{choice([1, 2])}}  # Size of outer DNN layers

    print('Build model...')
    initializer = RandomNormal(mean=0.0, stddev=0.05, seed=None)

    # Creating inner model

    input_aux = Input(shape=(maxlen, ))

    emb = Embedding(input_dim=vocab_size,
                    output_dim=emb_dim,
                    input_length=maxlen)

    x = emb(input_aux)

    filters = []
    kernel_size = []
    pool_size = []

    input_length = maxlen
    works = True

    for i in range(num_cnn):
        filter_choice = {{choice([32, 64])}}
        kernel_choice = {{choice([5, 7, 9])}}

        while input_length - kernel_choice + 1 <= 0:
            if kernel_choice >= 5:
                kernel_choice -= 2
            else:
                works = False
                break

        if works == False:
            break

        input_length = input_length - kernel_choice + 1
        print('input_length:' + str(input_length))
        print('kernel_size:' + str(kernel_choice))
        filters.append(filter_choice)
        kernel_size.append(kernel_choice)

        x = Conv1D(filters=filters[-1],
                   kernel_size=kernel_size[-1],
                   strides=1,
                   padding='valid',
                   kernel_initializer=initializer,
                   activation='tanh')(x)

        if i > 0 and inter_pool > 0 and (i + 1) % inter_pool == 0:
            print('input_length pool bfr:' + str(input_length))
            input_length = int(input_length / 3)

            if input_length <= 0:
                break
            x = MaxPool1D(pool_size=3, padding='valid')(x)
            print('input_length pool aftr:' + str(input_length))
    x = Flatten()(x)
    for _ in range(num_inner_dnn):
        x = Dense({{choice([32, 64])}}, activation='relu')(x)
        x = Dropout(0.5)(x)
    output_aux = x

    model_aux = Model(inputs=input_aux, outputs=output_aux)

    # Creating outer model

    input_1 = Input(shape=(maxlen, ))
    input_2 = Input(shape=(maxlen, ))

    x_1 = model_aux(input_1)
    x_2 = model_aux(input_2)
    x = concatenate([x_1, x_2])

    for _ in range(num_outer_dnn):
        x = Dense({{choice([32, 64])}}, activation='relu')(x)
    output = Dense(1, activation='sigmoid')(x)

    model = Model(inputs=[input_1, input_2], outputs=output)

    model.compile(optimizer=tf.keras.optimizers.Adam(lr=lr),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    result = model.fit([X_1, X_2],
                       y,
                       batch_size=batch_size,
                       epochs=epochs,
                       verbose=2,
                       validation_data=([X_1_val, X_2_val], y_val))

    validation_acc = np.amax(result.history['val_acc'])
    print('Best validation acc of epoch:', validation_acc)
    return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}
コード例 #14
0
    def __init__(self,
                 inshape=None,
                 input_model=None,
                 nb_features=None,
                 nb_levels=None,
                 max_pool=2,
                 feat_mult=1,
                 nb_conv_per_level=1,
                 do_res=False,
                 half_res=False,
                 name=None):
        """
        Parameters:
            inshape: Optional input tensor shape (including features). e.g. (192, 192, 192, 2).
            input_model: Optional input model that feeds directly into the unet before concatenation.
            nb_features: Unet convolutional features. Can be specified via a list of lists with
                the form [[encoder feats], [decoder feats]], or as a single integer. If None (default),
                the unet features are defined by the default config described in the class documentation.
            nb_levels: Number of levels in unet. Only used when nb_features is an integer. Default is None.
            feat_mult: Per-level feature multiplier. Only used when nb_features is an integer. Default is 1.
            nb_conv_per_level: Number of convolutions per unet level. Default is 1.
            half_res: Skip the last decoder upsampling. Default is False.
        """

        # save model name
        model_name = name

        # have the option of specifying input shape or input model
        if input_model is None:
            if inshape is None:
                raise ValueError(
                    'inshape must be supplied if input_model is None')
            unet_input = KL.Input(shape=inshape, name='unet_input')
            model_inputs = [unet_input]
        else:
            unet_input = KL.concatenate(input_model.outputs,
                                        name='unet_input_concat')
            model_inputs = input_model.inputs

        # default encoder and decoder layer features if nothing provided
        if nb_features is None:
            nb_features = default_unet_features()

        # build feature list automatically
        if isinstance(nb_features, int):
            if nb_levels is None:
                raise ValueError(
                    'must provide unet nb_levels if nb_features is an integer')
            feats = np.round(nb_features *
                             feat_mult**np.arange(nb_levels)).astype(int)
            nb_features = [
                np.repeat(feats[:-1], nb_conv_per_level),
                np.repeat(np.flip(feats), nb_conv_per_level)
            ]
        elif nb_levels is not None:
            raise ValueError(
                'cannot use nb_levels if nb_features is not an integer')

        ndims = len(unet_input.get_shape()) - 2
        assert ndims in (
            1, 2, 3), 'ndims should be one of 1, 2, or 3. found: %d' % ndims
        MaxPooling = getattr(KL, 'MaxPooling%dD' % ndims)

        # extract any surplus (full resolution) decoder convolutions
        enc_nf, dec_nf = nb_features
        nb_dec_convs = len(enc_nf)
        final_convs = dec_nf[nb_dec_convs:]
        dec_nf = dec_nf[:nb_dec_convs]
        nb_levels = int(nb_dec_convs / nb_conv_per_level) + 1

        if isinstance(max_pool, int):
            max_pool = [max_pool] * nb_levels

        # configure encoder (down-sampling path)
        enc_layers = []
        last = unet_input
        for level in range(nb_levels - 1):
            for conv in range(nb_conv_per_level):
                nf = enc_nf[level * nb_conv_per_level + conv]
                name = 'unet_enc_conv_%d_%d' % (level, conv)
                last = _conv_block(last, nf, name=name, do_res=do_res)
            enc_layers.append(last)

            # temporarily use maxpool since downsampling doesn't exist in keras
            last = MaxPooling(max_pool[level],
                              name='unet_enc_pooling_%d' % level)(last)

        # configure decoder (up-sampling path)
        for level in range(nb_levels - 1):
            real_level = nb_levels - level - 2
            for conv in range(nb_conv_per_level):
                nf = dec_nf[level * nb_conv_per_level + conv]
                name = 'unet_dec_conv_%d_%d' % (real_level, conv)
                last = _conv_block(last, nf, name=name, do_res=do_res)
            if not half_res or level < (nb_levels - 2):
                name = 'unet_dec_upsample_' + str(real_level)
                last = _upsample_block(last,
                                       enc_layers.pop(),
                                       factor=max_pool[real_level],
                                       name=name)

        # now we take care of any remaining convolutions
        for num, nf in enumerate(final_convs):
            name = 'unet_dec_final_conv_' + str(num)
            last = _conv_block(last, nf, name=name)

        super().__init__(inputs=model_inputs, outputs=last, name=model_name)
コード例 #15
0
    def __init__(self,
                 inshape,
                 nb_labels,
                 nb_unet_features=None,
                 init_mu=None,
                 init_sigma=None,
                 warp_atlas=True,
                 stat_post_warp=True,
                 stat_nb_feats=16,
                 network_stat_weight=0.001,
                 **kwargs):
        """ 
        Parameters:
            inshape: Input shape. e.g. (192, 192, 192)
            nb_labels: Number of labels in probabilistic atlas.
            nb_unet_features: Unet convolutional features. See VxmDense documentation for more information.
            init_mu: Optional initialization for gaussian means. Default is None.
            init_sigma: Optional initialization for gaussian sigmas. Default is None.
            stat_post_warp: Computes gaussian stats using the warped atlas. Default is True.
            stat_nb_feats: Number of features in the stats convolutional layer. Default is 16.
            network_stat_weight: Relative weight of the stats learned by the network. Default is 0.001.
            kwargs: Forwarded to the internal VxmDense model.
        """

        # ensure correct dimensionality
        ndims = len(inshape)
        assert ndims in [
            1, 2, 3
        ], 'ndims should be one of 1, 2, or 3. found: %d' % ndims

        # build warp network
        vxm_model = VxmDense(inshape,
                             nb_unet_features=nb_unet_features,
                             src_feats=nb_labels,
                             **kwargs)

        # extract necessary layers from the network
        # important to note that we're warping the atlas to the image in this case and
        # we'll swap the input order later
        atlas, image = vxm_model.inputs
        warped_atlas = vxm_model.references.y_source if warp_atlas else atlas
        flow = vxm_model.references.pos_flow

        # compute stat using the warped atlas (or not)
        if stat_post_warp:
            assert warp_atlas, 'must enable warp_atlas if computing stat post warp'
            combined = KL.concatenate([warped_atlas, image],
                                      name='post_warp_concat')
        else:
            # use last convolution in the unet before the flow convolution
            combined = vxm_model.references.unet_model.layers[-2].output

        # convolve into nlabel-stat volume
        conv = _conv_block(combined, stat_nb_feats)
        conv = _conv_block(conv, nb_labels)

        Conv = getattr(KL, 'Conv%dD' % ndims)
        weaknorm = KI.RandomNormal(mean=0.0, stddev=1e-5)

        # convolve into mu and sigma volumes
        stat_mu_vol = Conv(nb_labels,
                           kernel_size=3,
                           name='mu_vol',
                           kernel_initializer=weaknorm,
                           bias_initializer=weaknorm)(conv)
        stat_logssq_vol = Conv(nb_labels,
                               kernel_size=3,
                               name='logsigmasq_vol',
                               kernel_initializer=weaknorm,
                               bias_initializer=weaknorm)(conv)

        # pool to get 'final' stat
        stat_mu = KL.GlobalMaxPooling3D(name='mu_pooling')(stat_mu_vol)
        stat_logssq = KL.GlobalMaxPooling3D(
            name='logssq_pooling')(stat_logssq_vol)

        # combine mu with initialization
        if init_mu is not None:
            init_mu = np.array(init_mu)
            stat_mu = KL.Lambda(lambda x: network_stat_weight * x + init_mu,
                                name='comb_mu')(stat_mu)

        # combine sigma with initialization
        if init_sigma is not None:
            init_logsigmasq = np.array([2 * np.log(f) for f in init_sigma])
            stat_logssq = KL.Lambda(
                lambda x: network_stat_weight * x + init_logsigmasq,
                name='comb_sigma')(stat_logssq)

        # unnorm loglike
        def unnorm_loglike(I, mu, logsigmasq, use_log=True):
            P = tf.distributions.Normal(mu, K.exp(logsigmasq / 2))
            return P.log_prob(I) if use_log else P.prob(I)

        uloglhood = KL.Lambda(lambda x: unnorm_loglike(*x),
                              name='unsup_likelihood')(
                                  [image, stat_mu, stat_logssq])

        # compute data loss as a layer, because it's a bit easier than outputting a ton of things
        def logsum(prob_ll, atl):
            # safe computation using the log sum exp trick (NOTE: this does not normalize p)
            # https://www.xarg.org/2016/06/the-log-sum-exp-trick-in-machine-learning
            logpdf = prob_ll + K.log(atl + K.epsilon())
            alpha = tf.reduce_max(logpdf, -1, keepdims=True)
            return alpha + tf.log(
                tf.reduce_sum(K.exp(logpdf - alpha), -1, keepdims=True) +
                K.epsilon())

        loss_vol = KL.Lambda(lambda x: logsum(*x))([uloglhood, warped_atlas])

        # initialize the keras model
        super().__init__(inputs=[image, atlas], outputs=[loss_vol, flow])

        # cache pointers to layers and tensors for future reference
        self.references = LoadableModel.ReferenceContainer()
        self.references.vxm_model = vxm_model
        self.references.uloglhood = uloglhood
        self.references.stat_mu = stat_mu
        self.references.stat_logssq = stat_logssq
コード例 #16
0
    def __init__(self,
                 inshape,
                 nb_unet_features=None,
                 nb_unet_levels=None,
                 unet_feat_mult=1,
                 nb_unet_conv_per_level=1,
                 int_steps=7,
                 int_downsize=2,
                 bidir=False,
                 use_probs=False,
                 src_feats=1,
                 trg_feats=1,
                 unet_half_res=False,
                 input_model=None):
        """ 
        Parameters:
            inshape: Input shape. e.g. (192, 192, 192)
            nb_unet_features: Unet convolutional features. Can be specified via a list of lists with
                the form [[encoder feats], [decoder feats]], or as a single integer. If None (default),
                the unet features are defined by the default config described in the unet class documentation.
            nb_unet_levels: Number of levels in unet. Only used when nb_unet_features is an integer. Default is None.
            unet_feat_mult: Per-level feature multiplier. Only used when nb_unet_features is an integer. Default is 1.
            nb_unet_conv_per_level: Number of convolutions per unet level. Default is 1.
            int_steps: Number of flow integration steps. The warp is non-diffeomorphic when this value is 0.
            int_downsize: Integer specifying the flow downsample factor for vector integration. The flow field
                is not downsampled when this value is 1.
            bidir: Enable bidirectional cost function. Default is False.
            use_probs: Use probabilities in flow field. Default is False.
            src_feats: Number of source image features. Default is 1.
            trg_feats: Number of target image features. Default is 1.
            unet_half_res: Skip the last unet decoder upsampling. Requires that int_downsize=2. Default is False.
            input_model: Model to replace default input layer before concatenation. Default is None.
        """

        # ensure correct dimensionality
        ndims = len(inshape)
        assert ndims in [
            1, 2, 3
        ], 'ndims should be one of 1, 2, or 3. found: %d' % ndims

        if input_model is None:
            # configure default input layers if an input model is not provided
            source = tf.keras.Input(shape=(*inshape, src_feats),
                                    name='source_input')
            target = tf.keras.Input(shape=(*inshape, trg_feats),
                                    name='target_input')
            input_model = tf.keras.Model(inputs=[source, target],
                                         outputs=[source, target])
        else:
            source, target = input_model.outputs[:2]

        # build core unet model and grab inputs
        unet_model = Unet(input_model=input_model,
                          nb_features=nb_unet_features,
                          nb_levels=nb_unet_levels,
                          feat_mult=unet_feat_mult,
                          nb_conv_per_level=nb_unet_conv_per_level,
                          half_res=unet_half_res)

        # transform unet output into a flow field
        Conv = getattr(KL, 'Conv%dD' % ndims)
        flow_mean = Conv(ndims,
                         kernel_size=3,
                         padding='same',
                         kernel_initializer=KI.RandomNormal(mean=0.0,
                                                            stddev=1e-5),
                         name='flow')(unet_model.output)

        # optionally include probabilities
        if use_probs:
            # initialize the velocity variance very low, to start stable
            flow_logsigma = Conv(ndims,
                                 kernel_size=3,
                                 padding='same',
                                 kernel_initializer=KI.RandomNormal(
                                     mean=0.0, stddev=1e-10),
                                 bias_initializer=KI.Constant(value=-10),
                                 name='log_sigma')(unet_model.output)
            flow_params = KL.concatenate([flow_mean, flow_logsigma],
                                         name='prob_concat')
            flow = ne.layers.SampleNormalLogVar(name="z_sample")(
                [flow_mean, flow_logsigma])
        else:
            flow_params = flow_mean
            flow = flow_mean

        if not unet_half_res:
            # optionally resize for integration
            if int_steps > 0 and int_downsize > 1:
                flow = layers.RescaleTransform(1 / int_downsize,
                                               name='flow_resize')(flow)

        preint_flow = flow

        # optionally negate flow for bidirectional model
        pos_flow = flow
        if bidir:
            neg_flow = ne.layers.Negate(name='neg_flow')(flow)

        # integrate to produce diffeomorphic warp (i.e. treat flow as a stationary velocity field)
        if int_steps > 0:
            pos_flow = layers.VecInt(method='ss',
                                     name='flow_int',
                                     int_steps=int_steps)(pos_flow)
            if bidir:
                neg_flow = layers.VecInt(method='ss',
                                         name='neg_flow_int',
                                         int_steps=int_steps)(neg_flow)

            # resize to final resolution
            if int_downsize > 1:
                pos_flow = layers.RescaleTransform(int_downsize,
                                                   name='diffflow')(pos_flow)
                if bidir:
                    neg_flow = layers.RescaleTransform(
                        int_downsize, name='neg_diffflow')(neg_flow)

        # warp image with flow field
        y_source = layers.SpatialTransformer(interp_method='linear',
                                             indexing='ij',
                                             name='transformer')(
                                                 [source, pos_flow])
        if bidir:
            y_target = layers.SpatialTransformer(interp_method='linear',
                                                 indexing='ij',
                                                 name='neg_transformer')(
                                                     [target, neg_flow])

        # initialize the keras model
        outputs = [y_source, y_target] if bidir else [y_source]

        if use_probs:
            # compute loss on flow probabilities
            outputs += [flow_params]
        else:
            # compute smoothness loss on pre-integrated warp
            outputs += [preint_flow]

        super().__init__(name='vxm_dense',
                         inputs=input_model.inputs,
                         outputs=outputs)

        # cache pointers to layers and tensors for future reference
        self.references = LoadableModel.ReferenceContainer()
        self.references.unet_model = unet_model
        self.references.y_source = y_source
        self.references.y_target = y_target if bidir else None
        self.references.pos_flow = pos_flow
        self.references.neg_flow = neg_flow if bidir else None
コード例 #17
0
    def create_model(self, categorical_features, embedding_dim, lstm_neurons,
                     dense_neurons, **kwargs):
        num_steps = 300

        # initialize array that the model expects as an input
        user_inputs = []

        categorical_layers = []
        predict_layers = []
        # categorical features layers:
        for category in categorical_features:
            input = Input(shape=(num_steps, 1), name=f'{category}_input')
            user_inputs.append(input)
            embedding = Embedding(input_dim=2,
                                  output_dim=embedding_dim,
                                  name=f'{category}_embedding')(input)
            embedding = Lambda(lambda y: tf.squeeze(y, 2))(embedding)
            categorical_layers.append(embedding)

        categorical_vector = concatenate(categorical_layers)
        dense_cat = Dense(20, activation='relu')(categorical_vector)

        input_since_last = Input(shape=(num_steps, 1), name='input_since_last')
        user_inputs.append(input_since_last)

        context_cat_vector = concatenate([dense_cat, input_since_last])
        context_cat_lstm = LSTM(20,
                                return_sequences=True,
                                name='context_cat_lstm')(context_cat_vector)
        predict_layers.append(context_cat_lstm)

        input_temporal = Input(shape=(num_steps, 2), name='temporal_input')
        user_inputs.append(input_temporal)
        lstm_temporal = LSTM(lstm_neurons,
                             return_sequences=True,
                             name='lstm_temporal')(input_temporal)
        predict_layers.append(lstm_temporal)

        input_temporal_prev = Input(shape=(num_steps, 3),
                                    name='temporal_input_prev')
        user_inputs.append(input_temporal_prev)
        prev_lstm_temporal = LSTM(
            lstm_neurons, return_sequences=True,
            name='lstm_prev_temporal')(input_temporal_prev)
        predict_layers.append(prev_lstm_temporal)

        predict_vector = concatenate(predict_layers)

        lstm_out1 = LSTM(lstm_neurons, return_sequences=True,
                         name='lstm_out1')(predict_vector)
        lstm_result = LSTM(lstm_neurons,
                           return_sequences=True,
                           name='lstm_result')(lstm_out1)
        output = Dense(1, name='output')(lstm_result)

        model = keras.Model(inputs=user_inputs, outputs=[output])
        print(model.summary)

        model.compile(optimizer='adam', loss='mse', metrics=['mae'])

        print(model.summary())
        return model
コード例 #18
0
def create_model(img_height=912, img_width=912):
    nb_filter = [32, 64, 128, 256, 512]
    bn_axis = 3
    num_class = 1

    inputs = Input(shape=(img_height, img_width, 3), name='main_input')

    conv1_1 = Conv2D(nb_filter[0], (3, 3), activation='relu',
                     padding='same')(inputs)
    conv1_1 = Conv2D(nb_filter[0], (3, 3), activation='relu',
                     padding='same')(conv1_1)
    pool1 = MaxPooling2D((2, 2), strides=(2, 2), name='pool1')(conv1_1)

    conv2_1 = Conv2D(nb_filter[1], (3, 3), activation='relu',
                     padding='same')(pool1)
    conv2_1 = Conv2D(nb_filter[1], (3, 3), activation='relu',
                     padding='same')(conv2_1)
    pool2 = MaxPooling2D((2, 2), strides=(2, 2), name='pool2')(conv2_1)

    up1_2 = Conv2DTranspose(nb_filter[0], (2, 2),
                            strides=(2, 2),
                            name='up12',
                            padding='same')(conv2_1)
    conv1_2 = concatenate([up1_2, conv1_1], name='merge12', axis=bn_axis)
    conv1_2 = Conv2D(nb_filter[0], (3, 3), activation='relu',
                     padding='same')(conv1_2)
    conv1_2 = Conv2D(nb_filter[0], (3, 3), activation='relu',
                     padding='same')(conv1_2)

    conv3_1 = Conv2D(nb_filter[2], (3, 3), activation='relu',
                     padding='same')(pool2)
    conv3_1 = Conv2D(nb_filter[2], (3, 3), activation='relu',
                     padding='same')(conv3_1)
    pool3 = MaxPooling2D((2, 2), strides=(2, 2), name='pool3')(conv3_1)

    up2_2 = Conv2DTranspose(nb_filter[1], (2, 2),
                            strides=(2, 2),
                            name='up22',
                            padding='same')(conv3_1)
    conv2_2 = concatenate([up2_2, conv2_1], name='merge22', axis=bn_axis)
    conv2_2 = Conv2D(nb_filter[1], (3, 3), activation='relu',
                     padding='same')(conv2_2)
    conv2_2 = Conv2D(nb_filter[1], (3, 3), activation='relu',
                     padding='same')(conv2_2)

    up1_3 = Conv2DTranspose(nb_filter[0], (2, 2),
                            strides=(2, 2),
                            name='up13',
                            padding='same')(conv2_2)
    conv1_3 = concatenate([up1_3, conv1_1, conv1_2],
                          name='merge13',
                          axis=bn_axis)
    conv1_3 = Conv2D(nb_filter[0], (3, 3), activation='relu',
                     padding='same')(conv1_3)
    conv1_3 = Conv2D(nb_filter[0], (3, 3), activation='relu',
                     padding='same')(conv1_3)

    conv4_1 = Conv2D(nb_filter[3], (3, 3), activation='relu',
                     padding='same')(pool3)
    conv4_1 = Conv2D(nb_filter[3], (3, 3), activation='relu',
                     padding='same')(conv4_1)
    pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='pool4')(conv4_1)

    up3_2 = Conv2DTranspose(nb_filter[2], (2, 2),
                            strides=(2, 2),
                            name='up32',
                            padding='same')(conv4_1)
    conv3_2 = concatenate([up3_2, conv3_1], name='merge32', axis=bn_axis)
    conv3_2 = Conv2D(nb_filter[2], (3, 3), activation='relu',
                     padding='same')(conv3_2)
    conv3_2 = Conv2D(nb_filter[2], (3, 3), activation='relu',
                     padding='same')(conv3_2)

    up2_3 = Conv2DTranspose(nb_filter[1], (2, 2),
                            strides=(2, 2),
                            name='up23',
                            padding='same')(conv3_2)
    conv2_3 = concatenate([up2_3, conv2_1, conv2_2],
                          name='merge23',
                          axis=bn_axis)
    conv2_3 = Conv2D(nb_filter[1], (3, 3), activation='relu',
                     padding='same')(conv2_3)
    conv2_3 = Conv2D(nb_filter[1], (3, 3), activation='relu',
                     padding='same')(conv2_3)

    up1_4 = Conv2DTranspose(nb_filter[0], (2, 2),
                            strides=(2, 2),
                            name='up14',
                            padding='same')(conv2_3)
    conv1_4 = concatenate([up1_4, conv1_1, conv1_2, conv1_3],
                          name='merge14',
                          axis=bn_axis)
    conv1_4 = Conv2D(nb_filter[0], (3, 3), activation='relu',
                     padding='same')(conv1_4)
    conv1_4 = Conv2D(nb_filter[0], (3, 3), activation='relu',
                     padding='same')(conv1_4)

    #     conv5_1 = standard_unit(pool4, stage='51', nb_filter=nb_filter[4])
    conv5_1 = Conv2D(nb_filter[4], (3, 3), activation='relu',
                     padding='same')(pool4)
    conv5_1 = Conv2D(nb_filter[4], (3, 3), activation='relu',
                     padding='same')(conv5_1)

    up4_2 = Conv2DTranspose(nb_filter[3], (2, 2),
                            strides=(2, 2),
                            name='up42',
                            padding='same')(conv5_1)
    conv4_2 = concatenate([up4_2, conv4_1], name='merge42', axis=bn_axis)
    conv4_2 = Conv2D(nb_filter[3], (3, 3), activation='relu',
                     padding='same')(conv4_2)
    conv4_2 = Conv2D(nb_filter[3], (3, 3), activation='relu',
                     padding='same')(conv4_2)

    up3_3 = Conv2DTranspose(nb_filter[2], (2, 2),
                            strides=(2, 2),
                            name='up33',
                            padding='same')(conv4_2)
    conv3_3 = concatenate([up3_3, conv3_1, conv3_2],
                          name='merge33',
                          axis=bn_axis)
    conv3_3 = Conv2D(nb_filter[2], (3, 3), activation='relu',
                     padding='same')(conv3_3)
    conv3_3 = Conv2D(nb_filter[2], (3, 3), activation='relu',
                     padding='same')(conv3_3)

    up2_4 = Conv2DTranspose(nb_filter[1], (2, 2),
                            strides=(2, 2),
                            name='up24',
                            padding='same')(conv3_3)
    conv2_4 = concatenate([up2_4, conv2_1, conv2_2, conv2_3],
                          name='merge24',
                          axis=bn_axis)
    conv2_4 = Conv2D(nb_filter[1], (3, 3), activation='relu',
                     padding='same')(conv2_4)
    conv2_4 = Conv2D(nb_filter[1], (3, 3), activation='relu',
                     padding='same')(conv2_4)

    up1_5 = Conv2DTranspose(nb_filter[0], (2, 2),
                            strides=(2, 2),
                            name='up15',
                            padding='same')(conv2_4)
    conv1_5 = concatenate([up1_5, conv1_1, conv1_2, conv1_3, conv1_4],
                          name='merge15',
                          axis=bn_axis)
    conv1_5 = Conv2D(nb_filter[0], (3, 3), activation='relu',
                     padding='same')(conv1_5)
    conv1_5 = Conv2D(nb_filter[0], (3, 3), activation='relu',
                     padding='same')(conv1_5)

    nestnet_output_1 = Conv2D(num_class, (1, 1),
                              activation='sigmoid',
                              name='output_1',
                              kernel_initializer='he_normal',
                              padding='same',
                              kernel_regularizer=l2(1e-4))(conv1_2)
    nestnet_output_2 = Conv2D(num_class, (1, 1),
                              activation='sigmoid',
                              name='output_2',
                              kernel_initializer='he_normal',
                              padding='same',
                              kernel_regularizer=l2(1e-4))(conv1_3)
    nestnet_output_3 = Conv2D(num_class, (1, 1),
                              activation='sigmoid',
                              name='output_3',
                              kernel_initializer='he_normal',
                              padding='same',
                              kernel_regularizer=l2(1e-4))(conv1_4)
    nestnet_output_4 = Conv2D(num_class, (1, 1),
                              activation='sigmoid',
                              name='output_4',
                              kernel_initializer='he_normal',
                              padding='same',
                              kernel_regularizer=l2(1e-4))(conv1_5)

    model = Model(inputs, nestnet_output_4)

    return model
コード例 #19
0
def build_generator():
    # Encoder:
    input = tf.keras.Input(shape=[L_node, W_node, Channel * 1])
    # input: (None, 256, 256, 2)
    e1 = layers.Conv2D(64, 5, 2, 'same')(input)
    # e1: (None, 128, 128, 64)
    e2 = layers.LeakyReLU(0.2)(e1)
    e2 = layers.Conv2D(64, 3, 1, 'same', activation='relu')(e2)
    e2 = layers.Conv2D(128, 5, 2, 'same')(e2)  # Downsampling
    e2 = layers.BatchNormalization()(e2)
    # e2: (None, 64, 64, 128)
    e3 = layers.LeakyReLU(0.2)(e2)
    e3 = layers.Conv2D(128, 3, 1, 'same', activation='relu')(e3)
    e3 = layers.Conv2D(256, 5, 2, 'same')(e3)  # Downsampling
    e3 = layers.BatchNormalization()(e3)
    # e3: (None, 32, 32, 256)
    e4 = layers.LeakyReLU(0.2)(e3)
    e4 = layers.Conv2D(256, 3, 1, 'same', activation='relu')(e4)
    e4 = layers.Conv2D(512, 5, 2, 'same')(e4)  # Downsampling
    e4 = layers.BatchNormalization()(e4)
    # e4: (None, 16, 16, 512)
    e5 = layers.LeakyReLU(0.2)(e4)
    e5 = layers.Conv2D(512, 3, 1, 'same', activation='relu')(e5)
    e5 = layers.Conv2D(512, 5, 2, 'same')(e5)  # Downsampling
    e5 = layers.BatchNormalization()(e5)
    # e5: (None, 8, 8, 512)
    e6 = layers.LeakyReLU(0.2)(e5)
    e6 = layers.Conv2D(512, 3, 1, 'same', activation='relu')(e6)
    e6 = layers.Conv2D(512, 5, 2, 'same')(e6)  # Dowmsampling
    e6 = layers.BatchNormalization()(e6)
    # e6: (None, 4, 4, 512)
    e7 = layers.LeakyReLU(0.2)(e6)
    e7 = layers.Conv2D(512, 3, 1, 'same', activation='relu')(e7)
    e7 = layers.Conv2D(512, 5, 2, 'same')(e7)  # Downsampling
    e7 = layers.BatchNormalization()(e7)
    # e7: (None, 2, 2, 512)
    e8 = layers.LeakyReLU(0.2)(e7)
    e8 = layers.Conv2D(512, 3, 1, 'same', activation='relu')(e8)
    e8 = layers.Conv2D(512, 5, 2, 'same')(e8)  # Downsampling
    e8 = layers.BatchNormalization()(e8)
    # e8: (None, 1, 1, 512)
    # Decoder:
    d1 = layers.Activation('relu')(e8)
    d1 = layers.Conv2D(512, 3, 1, 'same', activation='relu')(d1)
    d1 = layers.Conv2DTranspose(512, 5, 2, 'same')(d1)  # Upsampling
    d1 = layers.BatchNormalization()(d1)
    d1 = layers.Dropout(0.5)(d1)
    d1 = layers.concatenate([d1, e7], 3)
    # d1: (None, 2, 2, 512*2)
    d2 = layers.Activation('relu')(d1)
    d2 = layers.Conv2D(512, 3, 1, 'same', activation='relu')(d2)
    d2 = layers.Conv2DTranspose(512, 5, 2, 'same')(d2)  # Upsampling
    d2 = layers.BatchNormalization()(d2)
    d2 = layers.Dropout(0.5)(d2)
    d2 = layers.concatenate([d2, e6], 3)
    # d2: (None, 4, 4, 512*2)
    d3 = layers.Activation('relu')(d2)
    d3 = layers.Conv2D(512, 3, 1, 'same', activation='relu')(d3)
    d3 = layers.Conv2DTranspose(512, 5, 2, 'same')(d3)  # Upsampling
    d3 = layers.BatchNormalization()(d3)
    d3 = layers.Dropout(0.5)(d3)
    d3 = layers.concatenate([d3, e5], 3)
    # d3: (None, 8, 8, 512*2)
    d4 = layers.Activation('relu')(d3)
    d4 = layers.Conv2D(512, 3, 1, 'same', activation='relu')(d4)
    d4 = layers.Conv2DTranspose(512, 5, 2, 'same')(d4)  # Upsampling
    d4 = layers.BatchNormalization()(d4)
    d4 = layers.Dropout(0.5)(d4)
    d4 = layers.concatenate([d4, e4], 3)
    # d4: (None, 16, 16, 512*2)
    d5 = layers.Activation('relu')(d4)
    d5 = layers.Conv2D(512, 3, 1, 'same', activation='relu')(d5)
    d5 = layers.Conv2DTranspose(256, 5, 2, 'same')(d5)  # Upsampling
    d5 = layers.BatchNormalization()(d5)
    d5 = layers.Dropout(0.5)(d5)
    d5 = layers.concatenate([d5, e3], 3)
    # d5: (None, 32, 32, 256*2)
    d6 = layers.Activation('relu')(d5)
    d6 = layers.Conv2D(256, 3, 1, 'same', activation='relu')(d6)
    d6 = layers.Conv2DTranspose(128, 5, 2, 'same')(d6)  # Upsampling
    d6 = layers.BatchNormalization()(d6)
    d6 = layers.Dropout(0.5)(d6)
    d6 = layers.concatenate([d6, e2], 3)
    # d6: (None, 64, 64, 128*2)
    d7 = layers.Activation('relu')(d6)
    d7 = layers.Conv2D(128, 3, 1, 'same', activation='relu')(d7)
    d7 = layers.Conv2DTranspose(64, 5, 2, 'same')(d7)  # Upsampling
    d7 = layers.BatchNormalization()(d7)
    d7 = layers.Dropout(0.5)(d7)
    d7 = layers.concatenate([d7, e1], 3)
    # d7: (None, 128, 128, 64*2)
    d8 = layers.Activation('relu')(d7)
    d8 = layers.Conv2D(64, 3, 1, 'same', activation='relu')(d8)
    d8 = layers.Conv2DTranspose(1, 5, 2, 'same')(d8)  # Upsampling
    d8 = layers.Activation('tanh')(d8)
    # d8: (None, 256, 256, 1)
    output = d8
    Model = tf.keras.Model(input, output)
    return Model
コード例 #20
0
    dtype='int32')

embedding_layer = Embedding(
    MAX_NUM_WORDS, NUM_EMBEDDING_DIM)
top_embedded_wd = embedding_layer(
    top_input_wd)
bm_embedded_wd = embedding_layer(
    bm_input_wd)

source_lstm_wd = Bidirectional(LSTM(NUM_LSTM_UNITS, return_sequences=True, recurrent_dropout = 0.3))
shared_lstm_wd = Bidirectional(LSTM(NUM_LSTM_UNITS, activation='tanh', recurrent_dropout = 0.3))
top_source_wd = source_lstm_wd(top_embedded_wd)
bm_source_wd = source_lstm_wd(bm_embedded_wd)

source_comb_wd = concatenate(
    [top_source_wd, bm_source_wd],
    axis=-1
    )
lstm_ops_wd = shared_lstm_wd(source_comb_wd)   # 300D vector


top_input_bt = Input(
    shape=(768, ), 
    dtype='float32')
bm_input_bt = Input(
    shape=(768, ), 
    dtype='float32')


top_embedded_bt = Reshape((1, 768, ))(top_input_bt)
bm_embedded_bt = Reshape((1, 768, ))(bm_input_bt)
コード例 #21
0
ファイル: component_nn.py プロジェクト: rock420/bugbug
    def model_creator(self, X, y):
        short_desc_inp = Input(shape=(self.short_desc_maxlen, ),
                               name="title_sequence")
        short_desc_emb = Embedding(self.short_desc_vocab_size,
                                   self.short_desc_emb_sz)(short_desc_inp)
        short_desc_emb = SpatialDropout1D(
            self.short_desc_emb_dropout_rate)(short_desc_emb)
        short_desc_encoded = Bidirectional(
            GRU(
                self.short_desc_encoded_gru_units,
                dropout=self.short_desc_encoded_gru_dropout,
                recurrent_dropout=self.short_desc_encoded_recurrent_dropout,
                return_sequences=True,
            ))(short_desc_emb)
        short_desc_encoded = GlobalMaxPooling1D()(short_desc_encoded)

        long_desc_inp = Input(shape=(self.long_desc_maxlen, ),
                              name="first_comment_sequence")
        long_desc_emb = Embedding(self.long_desc_vocab_size,
                                  self.long_desc_emb_sz)(long_desc_inp)
        long_desc_emb = SpatialDropout1D(
            self.long_desc_emb_dropout_rate)(long_desc_emb)
        long_desc_encoded = Bidirectional(
            GRU(
                self.long_desc_encoded_gru_units,
                dropout=self.long_desc_encoded_dropout,
                recurrent_dropout=self.long_desc_encoded_recurrent_dropout,
                return_sequences=True,
            ))(long_desc_emb)
        long_desc_encoded = GlobalMaxPooling1D()(long_desc_encoded)

        rep_platform_inp = Input(shape=(1, ), name="platform")
        rep_platform_emb = Embedding(
            input_dim=self.rep_platform_emb_input_dim,
            output_dim=self.rep_platform_emb_output_dim,
            input_length=1,
        )(rep_platform_inp)
        rep_platform_emb = SpatialDropout1D(
            self.rep_platform_emb_spatial_dropout_rate)(rep_platform_emb)
        rep_platform_emb = Flatten()(rep_platform_emb)
        rep_platform_emb = Dropout(
            self.rep_platform_emb_dropout_rate)(rep_platform_emb)

        op_sys_inp = Input(shape=(1, ), name="op_sys")
        op_sys_emb = Embedding(
            input_dim=self.op_sys_emb_input_dim,
            output_dim=self.op_sys_emb_output_dim,
            input_length=1,
        )(op_sys_inp)
        op_sys_emb = SpatialDropout1D(
            self.op_sys_emb_spatial_dropout_rate)(op_sys_emb)
        op_sys_emb = Flatten()(op_sys_emb)
        op_sys_emb = Dropout(self.op_sys_emb_dropout_rate)(op_sys_emb)

        reporter_inp = Input(shape=(1, ), name="bug_reporter")
        reporter_emb = Embedding(
            input_dim=self.reporter_emb_input_dim,
            output_dim=self.reporter_emb_output_dim,
            input_length=1,
        )(reporter_inp)
        reporter_emb = SpatialDropout1D(
            self.reporter_emb_spatial_dropout_rate)(reporter_emb)
        reporter_emb = Flatten()(reporter_emb)
        reporter_emb = Dropout(self.reporter_emb_dropout_rate)(reporter_emb)

        tfidf_word_inp = Input(shape=(X["title_word_tfidf"].shape[1], ),
                               name="title_word_tfidf")
        tfidf_word = Dense(self.tfidf_word_dense_units,
                           activation="relu")(tfidf_word_inp)
        tfidf_word = Dropout(self.tfidf_word_dropout_rate)(tfidf_word)

        tfidf_char_inp = Input(shape=(X["title_char_tfidf"].shape[1], ),
                               name="title_char_tfidf")
        tfidf_char = Dense(self.tfidf_char_inp_dense_unit,
                           activation="relu")(tfidf_char_inp)
        tfidf_char = Dropout(self.tfidf_char_inp_dropout_rate)(tfidf_char)

        x = layers.concatenate(
            [
                short_desc_encoded,
                long_desc_encoded,
                rep_platform_emb,
                op_sys_emb,
                reporter_emb,
                tfidf_word,
                tfidf_char,
            ],
            axis=-1,
        )

        x = Dense(self.x_dense_unit, activation="relu")(x)
        x = Dropout(self.x_dropout_rate)(x)
        x = Dense(y.shape[1], activation="softmax")(x)

        model = KerasModel(
            [
                short_desc_inp,
                long_desc_inp,
                rep_platform_inp,
                op_sys_inp,
                reporter_inp,
                tfidf_word_inp,
                tfidf_char_inp,
            ],
            x,
        )
        model.compile(optimizer="adam",
                      loss=["categorical_crossentropy"],
                      metrics=["acc"])

        return model
コード例 #22
0
ファイル: zoo.py プロジェクト: cclauss/sight
    def load_architecture(self):
        """
		Returns tf.keras.models.Model instance
		"""
        inp_image = Input(shape=[None, None, 3])

        x = ConvBlock.get_conv_block(inp_image, [{
            'filter': 32,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 0
        }, {
            'filter': 64,
            'kernel': 3,
            'stride': 2,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 1
        }, {
            'filter': 32,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 2
        }, {
            'filter': 64,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 3
        }])

        x = ConvBlock.get_conv_block(x, [{
            'filter': 128,
            'kernel': 3,
            'stride': 2,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 5
        }, {
            'filter': 64,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 6
        }, {
            'filter': 128,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 7
        }])

        x = ConvBlock.get_conv_block(x, [{
            'filter': 64,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 9
        }, {
            'filter': 128,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 10
        }])

        x = ConvBlock.get_conv_block(x, [{
            'filter': 256,
            'kernel': 3,
            'stride': 2,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 12
        }, {
            'filter': 128,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 13
        }, {
            'filter': 256,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 14
        }])

        for i in range(7):
            x = ConvBlock.get_conv_block(x, [{
                'filter': 128,
                'kernel': 1,
                'stride': 1,
                'bnorm': True,
                'leaky': True,
                'layer_idx': 16 + i * 3
            }, {
                'filter': 256,
                'kernel': 3,
                'stride': 1,
                'bnorm': True,
                'leaky': True,
                'layer_idx': 17 + i * 3
            }])

        skip_36 = x

        x = ConvBlock.get_conv_block(x, [{
            'filter': 512,
            'kernel': 3,
            'stride': 2,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 37
        }, {
            'filter': 256,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 38
        }, {
            'filter': 512,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 39
        }])

        for i in range(7):
            x = ConvBlock.get_conv_block(x, [{
                'filter': 256,
                'kernel': 1,
                'stride': 1,
                'bnorm': True,
                'leaky': True,
                'layer_idx': 41 + i * 3
            }, {
                'filter': 512,
                'kernel': 3,
                'stride': 1,
                'bnorm': True,
                'leaky': True,
                'layer_idx': 42 + i * 3
            }])

        skip_61 = x

        x = ConvBlock.get_conv_block(x, [{
            'filter': 1024,
            'kernel': 3,
            'stride': 2,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 62
        }, {
            'filter': 512,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 63
        }, {
            'filter': 1024,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 64
        }])

        for i in range(3):
            x = ConvBlock.get_conv_block(x, [{
                'filter': 512,
                'kernel': 1,
                'stride': 1,
                'bnorm': True,
                'leaky': True,
                'layer_idx': 66 + i * 3
            }, {
                'filter': 1024,
                'kernel': 3,
                'stride': 1,
                'bnorm': True,
                'leaky': True,
                'layer_idx': 67 + i * 3
            }])

        x = ConvBlock.get_conv_block(x, [{
            'filter': 512,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 75
        }, {
            'filter': 1024,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 76
        }, {
            'filter': 512,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 77
        }, {
            'filter': 1024,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 78
        }, {
            'filter': 512,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 79
        }],
                                     skip=False)

        yolo_82 = ConvBlock.get_conv_block(x, [{
            'filter': 1024,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 80
        }, {
            'filter': 255,
            'kernel': 1,
            'stride': 1,
            'bnorm': False,
            'leaky': False,
            'layer_idx': 81
        }],
                                           skip=False)

        x = ConvBlock.get_conv_block(x, [{
            'filter': 256,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 84
        }],
                                     skip=False)
        x = UpSampling2D(2)(x)
        x = concatenate([x, skip_61])

        x = ConvBlock.get_conv_block(x, [{
            'filter': 256,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 87
        }, {
            'filter': 512,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 88
        }, {
            'filter': 256,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 89
        }, {
            'filter': 512,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 90
        }, {
            'filter': 256,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 91
        }],
                                     skip=False)

        yolo_94 = ConvBlock.get_conv_block(x, [{
            'filter': 512,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 92
        }, {
            'filter': 255,
            'kernel': 1,
            'stride': 1,
            'bnorm': False,
            'leaky': False,
            'layer_idx': 93
        }],
                                           skip=False)

        x = ConvBlock.get_conv_block(x, [{
            'filter': 128,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 96
        }],
                                     skip=False)
        x = UpSampling2D(2)(x)
        x = concatenate([x, skip_36])

        yolo_106 = ConvBlock.get_conv_block(x, [{
            'filter': 128,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 99
        }, {
            'filter': 256,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 100
        }, {
            'filter': 128,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 101
        }, {
            'filter': 256,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 102
        }, {
            'filter': 128,
            'kernel': 1,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 103
        }, {
            'filter': 256,
            'kernel': 3,
            'stride': 1,
            'bnorm': True,
            'leaky': True,
            'layer_idx': 104
        }, {
            'filter': 255,
            'kernel': 1,
            'stride': 1,
            'bnorm': False,
            'leaky': False,
            'layer_idx': 105
        }],
                                            skip=False)

        model = Model(inp_image, [yolo_82, yolo_94, yolo_106])
        return model
コード例 #23
0
# 모델2
input2 = Input(shape=(3, ))
dense2 = Dense(10, activation='relu')(input2)
dense2 = Dense(5, activation='relu')(dense2)
dense2 = Dense(5, activation='relu')(dense2)
dense2 = Dense(5, activation='relu')(dense2)
# output2 = Dense(3)(dense2)

# 모델 병합 / concatenate
from tensorflow.keras.layers import concatenate, Concatenate
# from keras.layers.merge import concatenate, Concatenate
# from keras.layers import concatenate, Concatenate

# merge = 합치다
merge1 = concatenate([dense1, dense2])  # 제일 끝의 dense 변수명 넣기
middle1 = Dense(30)(merge1)
middle1 = Dense(10)(middle1)
middle1 = Dense(10)(middle1)

# 모델 분기1
output1 = Dense(30)(middle1)
output1 = Dense(7)(output1)
output1 = Dense(3)(output1)

# 모델 분기2
output2 = Dense(15)(middle1)
output2 = Dense(7)(output2)
output2 = Dense(7)(output2)
output2 = Dense(3)(output2)
コード例 #24
0
pool1 = GlobalMaxPool1D()(conv1)

conv2 = Conv1D(filters=128,
               kernel_size=4,
               padding='valid',
               activation=tf.nn.relu)(dropout_emb)
pool2 = GlobalMaxPool1D()(conv2)

conv3 = Conv1D(filters=128,
               kernel_size=5,
               padding='valid',
               activation=tf.nn.relu)(dropout_emb)
pool3 = GlobalMaxPool1D()(conv3)

# # 3, 4, 5- gram 이후 합치기\
concat = concatenate([pool1, pool2, pool3])

hidden = Dense(128, activation=tf.nn.relu)(concat)
dropout_hidden = Dropout(rate=dropout_prob)(hidden)

logits = Dense(3, name='logits')(dropout_hidden)  # 합계

predictions = Dense(3, activation=tf.nn.softmax)(
    logits)  # logits에서 나온 합계를 softmax 확률
#

# 모델 생성
model = Model(inputs=input_layer, outputs=predictions)

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
コード例 #25
0
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K

from tensorflow.keras.layers import Input, Dense, concatenate
from tensorflow.keras.models import Model

input_user = Input(shape=(3, ))
input_ad = Input(shape=(3, ))

merged = concatenate([input_user, input_ad])
output_1 = Dense(64, activation='relu')(merged)
output_2 = Dense(64, activation='relu')(output_1)
predictions = Dense(1)(output_2)

model = Model(inputs=[input_user, input_ad], outputs=predictions)
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])
model.summary()

SAMPLES = 1000
user_data = np.random.rand(SAMPLES, 3)
ad_data = np.random.rand(SAMPLES, 3)
labels = np.random.rand(SAMPLES, 1)
print(user_data[:10])
print(ad_data[:10])
print(labels[:10])

model.fit(
    [user_data, ad_data],
コード例 #26
0
ファイル: DL_train.py プロジェクト: minjung-s/Shopping_GG
def DL_model(X_num, X_emb, X_time):
    def create_mlp(dim):
        # define our MLP network
        model = Sequential()
        model.add(Dense(64, input_dim=dim, activation='relu'))
        model.add(Dense(64, input_dim=dim, activation='relu'))
        model.add(BatchNormalization())
        model.add(Dropout(0.3))
        model.add(Dense(32, input_dim=dim, activation='relu'))
        model.add(Dense(32, input_dim=dim, activation='relu'))
        model.add(BatchNormalization())
        model.add(Dropout(0.3))
        model.add(Dense(16, activation='relu'))
        model.add(Dense(16, activation='relu'))
        model.add(BatchNormalization())
        model.add(Dropout(0.3))
        model.add(Dense(8, activation='relu'))
        model.add(Dense(8, activation='relu'))
        model.add(BatchNormalization())
        model.add(Dropout(0.3))
        model.add(Dense(4, activation='relu'))
        model.add(Dense(4, activation='relu'))
        return model

    def create_1Dcnn(dim):
        inputShape = (dim, 1)

        Inputs = Input(shape=inputShape)

        conv1 = Conv1D(filters=16,
                       kernel_size=3,
                       padding='valid',
                       activation='linear',
                       kernel_initializer='he_normal')(Inputs)
        pool1 = GlobalMaxPooling1D()(conv1)

        conv2 = Conv1D(filters=16,
                       kernel_size=4,
                       padding='valid',
                       activation='linear',
                       kernel_initializer='he_normal')(Inputs)
        pool2 = GlobalMaxPooling1D()(conv2)

        conv3 = Conv1D(filters=16,
                       kernel_size=5,
                       padding='valid',
                       activation='linear',
                       kernel_initializer='he_normal')(Inputs)
        pool3 = GlobalMaxPooling1D()(conv3)

        concat = concatenate([pool1, pool2, pool3])
        #concat = tf.expand_dims(concat,-1)

        #results = LSTM(64)(concat)
        results = Dense(10,
                        activation='linear',
                        kernel_initializer='he_normal')(concat)
        model = Model(Inputs, results)

        return model

    def create_lstm(dim):
        inputShape = (dim, 1)

        inputs = Input(shape=inputShape)

        x = Bidirectional(
            LSTM(20, return_sequences=True,
                 kernel_initializer='he_normal'))(inputs)
        x = Dropout(0.2)(x)
        x = Bidirectional(LSTM(10, kernel_initializer='he_normal'))(x)
        x = Dropout(0.2)(x)
        x = Dense(10, activation='relu', kernel_initializer='he_normal')(x)
        model = Model(inputs, x)

        return model

    mlp = create_mlp(X_num.shape[1])
    cnn = create_1Dcnn(X_emb.shape[1])
    lstm = create_lstm((X_time.shape[1]))

    combinedInput = concatenate([mlp.output, cnn.output, lstm.output])

    x = Dense(32, activation="selu")(combinedInput)
    x = Dense(16, activation="selu")(x)
    x = BatchNormalization()(x)
    x = Dropout(0.3)(x)
    x = Dense(8, activation="selu")(x)
    x = Dense(1, activation="selu")(x)

    model = Model(inputs=[mlp.input, cnn.input, lstm.input], outputs=x)

    return model
コード例 #27
0
def get_unet_batch_lstm(base_dense,
                        img_w,
                        img_h,
                        img_ch,
                        dropout=False,
                        dr=0.2):
    input_size = (img_w, img_h, img_ch)
    input_layer = Input(shape=input_size, name='input_layer')

    if dropout == True:
        conv1 = conv_block(input_layer, base_dense, BatchNorm=True)
        pool1 = MaxPooling2D((2, 2))(conv1)
        pool1 = Dropout(dr)(pool1)

        conv2 = conv_block(pool1, base_dense * 2, BatchNorm=True)
        pool2 = MaxPooling2D((2, 2))(conv2)
        pool2 = Dropout(dr)(pool2)

        conv3 = conv_block(pool2, base_dense * 4, BatchNorm=True)
        pool3 = MaxPooling2D((2, 2))(conv3)
        pool3 = Dropout(dr)(pool3)

        conv4 = conv_block(pool3, base_dense * 8, BatchNorm=True)
        pool4 = MaxPooling2D((2, 2))(conv4)
        pool4 = Dropout(dr)(pool4)

        #middle
        convm = conv_block(pool4, base_dense * 16, BatchNorm=True)

        # up-sampling:
        deconv1 = Conv2DTranspose(base_dense * 8, (3, 3),
                                  strides=(2, 2),
                                  padding='same')(convm)
        # reshaping:
        x1 = Reshape(target_shape=(1, np.int32(img_size / 8),
                                   np.int32(img_size / 8),
                                   base_dense * 8))(conv4)
        # LSTM:
        x2 = Reshape(target_shape=(1, np.int32(img_size / 8),
                                   np.int32(img_size / 8),
                                   base_dense * 8))(deconv1)
        # concatenation:
        uconv1 = concatenate([x1, x2], axis=1)
        uconv1 = Dropout(dr)(uconv1)
        uconv1 = ConvLSTM2D(base_dense * 4, (3, 3),
                            padding='same',
                            return_sequences=False,
                            go_backwards=True)(uconv1)
        # the function conv_block implements the usual convolutional block with 2 convolutional layer:
        uconv1 = conv_block(deconv1, base_dense=base_dense * 8, BatchNorm=True)

        # up-sampling:
        deconv2 = Conv2DTranspose(base_dense * 4, (3, 3),
                                  strides=(2, 2),
                                  padding='same')(uconv1)
        # reshaping:
        x3 = Reshape(target_shape=(1, np.int32(img_size / 4),
                                   np.int32(img_size / 4),
                                   base_dense * 4))(conv3)
        # LSTM:
        x4 = Reshape(target_shape=(1, np.int32(img_size / 4),
                                   np.int32(img_size / 4),
                                   base_dense * 4))(deconv2)
        # concatenation:
        uconv2 = concatenate([x3, x4], axis=1)
        uconv2 = Dropout(dr)(uconv2)
        uconv2 = ConvLSTM2D(base_dense * 4, (3, 3),
                            padding='same',
                            return_sequences=False,
                            go_backwards=True)(uconv2)
        # the function conv_block implements the usual convolutional block with 2 convolutional layer:
        uconv2 = conv_block(uconv2, base_dense=base_dense * 2, BatchNorm=True)

        # up-sampling:
        deconv3 = Conv2DTranspose(base_dense * 2, (3, 3),
                                  strides=(2, 2),
                                  padding='same')(uconv2)
        # reshaping:
        x5 = Reshape(target_shape=(1, np.int32(img_size / 2),
                                   np.int32(img_size / 2),
                                   base_dense * 2))(conv2)
        # LSTM:
        x6 = Reshape(target_shape=(1, np.int32(img_size / 2),
                                   np.int32(img_size / 2),
                                   base_dense * 2))(deconv3)
        # concatenation:
        uconv3 = concatenate([x5, x6], axis=1)
        uconv3 = Dropout(dr)(uconv3)
        uconv3 = ConvLSTM2D(base_dense * 2, (3, 3),
                            padding='same',
                            return_sequences=False,
                            go_backwards=True)(uconv3)
        # the function conv_block implements the usual convolutional block with 2 convolutional layer:
        uconv3 = conv_block(uconv3, base_dense=base_dense * 2, BatchNorm=True)

        # up-sampling:
        deconv4 = Conv2DTranspose(base_dense, (3, 3),
                                  strides=(2, 2),
                                  padding='same')(uconv3)
        # reshaping:
        x7 = Reshape(target_shape=(1, np.int32(img_size), np.int32(img_size),
                                   base_dense))(conv1)
        # LSTM:
        x8 = Reshape(target_shape=(1, np.int32(img_size), np.int32(img_size),
                                   base_dense))(deconv4)
        # concatenation:
        uconv4 = concatenate([x7, x8], axis=1)
        uconv4 = Dropout(dr)(uconv4)
        uconv4 = ConvLSTM2D(base_dense * 2, (3, 3),
                            padding='same',
                            return_sequences=False,
                            go_backwards=True)(uconv4)
        # the function conv_block implements the usual convolutional block with 2 convolutional layer:
        uconv4 = conv_block(uconv4, base_dense=base_dense, BatchNorm=True)

    output_layer = Conv2D(1, (1, 1),
                          padding='same',
                          activation='sigmoid',
                          name='output_layer')(uconv4)

    model = Model(inputs=input_layer, outputs=output_layer)
    model.summary()

    return model
コード例 #28
0
    base_model_low_out = base_model.get_layer(low_name).output
    base_model_low = models.Model(base_model.input, base_model_low_out)
    # Freeze all layers
    base_model_low.trainable = False
    base_low_out = base_model_low(input_v)
    # Testing to see if it matches lower level features
    #low_level_feature_model = models.Model(inputs=base_model.input,
    #	outputs=base_model.get_layer('block3_pool').output)
    #low_level_output = low_level_feature_model.predict(X_v_train[1,].reshape(1,230,119,3))

    # Flatten base model
    flatten = layers.Flatten()(base_low_out)

    # Add in situational data
    concat = layers.concatenate([flatten, input_s])

    # Pass through dense layer
    dense = layers.Dense(NUM_UNITS,
                         activation="relu",
                         kernel_initializer="he_normal")(concat)

    # Output layer for binary classification
    out = layers.Dense(1, activation="sigmoid")(dense)

    # Define model
    model = models.Model(inputs=[input_v, input_s], outputs=out)

    # Define optimizer and metrics
    model.compile(optimizer=optimizers.Adam(lr=LEARNING_RATE),
                  metrics=["accuracy"],
コード例 #29
0
ファイル: pbt_memnn_example.py プロジェクト: wuisawesome/ray
    def build_model(self):
        """Helper method for creating the model"""
        vocab = set()
        for story, q, answer in self.train_stories + self.test_stories:
            vocab |= set(story + q + [answer])
        vocab = sorted(vocab)

        # Reserve 0 for masking via pad_sequences
        vocab_size = len(vocab) + 1
        story_maxlen = max(len(x) for x, _, _ in self.train_stories + self.test_stories)
        query_maxlen = max(len(x) for _, x, _ in self.train_stories + self.test_stories)

        word_idx = {c: i + 1 for i, c in enumerate(vocab)}
        self.inputs_train, self.queries_train, self.answers_train = vectorize_stories(
            word_idx, story_maxlen, query_maxlen, self.train_stories
        )
        self.inputs_test, self.queries_test, self.answers_test = vectorize_stories(
            word_idx, story_maxlen, query_maxlen, self.test_stories
        )

        # placeholders
        input_sequence = Input((story_maxlen,))
        question = Input((query_maxlen,))

        # encoders
        # embed the input sequence into a sequence of vectors
        input_encoder_m = Sequential()
        input_encoder_m.add(Embedding(input_dim=vocab_size, output_dim=64))
        input_encoder_m.add(Dropout(self.config.get("dropout", 0.3)))
        # output: (samples, story_maxlen, embedding_dim)

        # embed the input into a sequence of vectors of size query_maxlen
        input_encoder_c = Sequential()
        input_encoder_c.add(Embedding(input_dim=vocab_size, output_dim=query_maxlen))
        input_encoder_c.add(Dropout(self.config.get("dropout", 0.3)))
        # output: (samples, story_maxlen, query_maxlen)

        # embed the question into a sequence of vectors
        question_encoder = Sequential()
        question_encoder.add(
            Embedding(input_dim=vocab_size, output_dim=64, input_length=query_maxlen)
        )
        question_encoder.add(Dropout(self.config.get("dropout", 0.3)))
        # output: (samples, query_maxlen, embedding_dim)

        # encode input sequence and questions (which are indices)
        # to sequences of dense vectors
        input_encoded_m = input_encoder_m(input_sequence)
        input_encoded_c = input_encoder_c(input_sequence)
        question_encoded = question_encoder(question)

        # compute a "match" between the first input vector sequence
        # and the question vector sequence
        # shape: `(samples, story_maxlen, query_maxlen)`
        match = dot([input_encoded_m, question_encoded], axes=(2, 2))
        match = Activation("softmax")(match)

        # add the match matrix with the second input vector sequence
        response = add(
            [match, input_encoded_c]
        )  # (samples, story_maxlen, query_maxlen)
        response = Permute((2, 1))(response)  # (samples, query_maxlen, story_maxlen)

        # concatenate the match matrix with the question vector sequence
        answer = concatenate([response, question_encoded])

        # the original paper uses a matrix multiplication.
        # we choose to use a RNN instead.
        answer = LSTM(32)(answer)  # (samples, 32)

        # one regularization layer -- more would probably be needed.
        answer = Dropout(self.config.get("dropout", 0.3))(answer)
        answer = Dense(vocab_size)(answer)  # (samples, vocab_size)
        # we output a probability distribution over the vocabulary
        answer = Activation("softmax")(answer)

        # build the final model
        model = Model([input_sequence, question], answer)
        return model
コード例 #30
0
    m = [757.8467177554851, 555.4583686176969, 2.1526621404323745]
    c = [4.5697448219753305, 68.02014004606737, 0.9322994212410762]
    for i in range(0, 3):
        X_pos[:, i] = X_pos[:, i] - m[i]
        X_pos[:, i] = X_pos[:, i] / c[i]
    #Normalizing lidar will cast all to float making it too big, normalize the input tensor instead
    lidar = np.load('./lidar_010.npz')
    X_lidar = lidar['input']
    return X_pos, X_lidar


X_p, X_l = loadData()
lidar_model = models.lidarNet(X_l[0].shape, 2, 3)

pos_model = models.posNet(3)
combined_model = concatenate([pos_model.output, lidar_model.output])
reg_val = 0
layer = Dense(600,
              activation='relu',
              kernel_regularizer=l2(reg_val),
              bias_regularizer=l2(reg_val))(combined_model)
layer = Dense(600,
              activation='relu',
              kernel_regularizer=l2(reg_val),
              bias_regularizer=l2(reg_val))(layer)
layer = Dense(500,
              activation='relu',
              kernel_regularizer=l2(reg_val),
              bias_regularizer=l2(reg_val))(layer)
out = Dense(256, activation='softmax')(layer)
model = Model(inputs=[pos_model.input, lidar_model.input], outputs=out)
コード例 #31
0
def make_stack_net_v4(inp_shape, batch_size, params):
    """
    Autoencoder combined with VCNN
    """
    inputs = {
        'conditioned_occ': tf.keras.Input(batch_size=batch_size,
                                          shape=inp_shape),
        'known_occ': tf.keras.Input(batch_size=batch_size, shape=inp_shape),
        'known_free': tf.keras.Input(batch_size=batch_size, shape=inp_shape),
    }

    # Autoencoder
    x = tfl.concatenate([inputs['known_occ'], inputs['known_free']], axis=4)

    for n_filter in [64, 128, 256, 512]:
        x = tfl.Conv3D(n_filter, (
            2,
            2,
            2,
        ), use_bias=True, padding="same")(x)
        x = tfl.Activation(tf.nn.relu)(x)
        x = tfl.MaxPool3D((2, 2, 2))(x)

    x = tfl.Flatten()(x)
    x = tfl.Dense(params['num_latent_layers'], activation='relu')(x)
    x = tfl.Dense(32768, activation='relu')(x)
    x = tfl.Reshape((4, 4, 4, 512))(x)
    auto_encoder_features = x

    for n_filter in [256, 128, 64, 12]:
        x = tfl.Conv3DTranspose(n_filter, (
            2,
            2,
            2,
        ),
                                use_bias=True,
                                strides=2)(x)
        x = tfl.Activation(tf.nn.relu)(x)

    x = tfl.Conv3D(1, (1, 1, 1), use_bias=True)(x)
    ae_output_before_activation = x
    autoencoder_output = tfl.Activation(tf.nn.sigmoid)(x)

    # VCNN
    filter_size = [2, 2, 2]
    # n_filters = [64, 128, 256, 512]

    x = inputs['conditioned_occ']
    conv_args_strided = {
        'use_bias': True,
        'nln': tf.nn.elu,
        'strides': [1, 2, 2, 2, 1]
    }

    def bs_strided(x, n_filters):
        return nn.BackShiftConv3D(n_filters,
                                  filter_size=filter_size,
                                  **conv_args_strided)(x)

    def bds_strided(x, n_filters):
        return nn.BackDownShiftConv3D(n_filters,
                                      filter_size=filter_size,
                                      **conv_args_strided)(x)

    def bdrs_strided(x, n_filters):
        return nn.BackDownRightShiftConv3D(n_filters,
                                           filter_size=filter_size,
                                           **conv_args_strided)(x)

    conv_args = {
        'use_bias': True,
        'nln': tf.nn.elu,
        'strides': [1, 1, 1, 1, 1]
    }

    def bs(x, n_filters):
        return nn.BackShiftConv3D(n_filters,
                                  filter_size=filter_size,
                                  **conv_args)(x)

    def bds(x, n_filters):
        return nn.BackDownShiftConv3D(n_filters,
                                      filter_size=filter_size,
                                      **conv_args)(x)

    def bdrs(x, n_filters):
        return nn.BackDownRightShiftConv3D(n_filters,
                                           filter_size=filter_size,
                                           **conv_args)(x)

    flf = 4  # num_first_layer_filters

    #Front,     #Upper Front, and     #Left Upper Front
    f_1 = nn.BackShift()(bs(x, flf))
    uf_1 = nn.BackShift()(bs(x, flf)) + \
           nn.DownShift()(bds(x, flf))
    luf_1 = nn.BackShift()(bs(x, flf)) + \
            nn.DownShift()(bds(x, flf)) + \
            nn.RightShift()(bdrs(x, flf))

    for i in range(2):
        f_1 = bs(f_1, flf)
        uf_1 = bds(uf_1, flf) + f_1
        luf_1 = bdrs(luf_1, flf) + uf_1

    f_list = [f_1]
    uf_list = [uf_1]
    luf_list = [luf_1]

    for fs in [64, 128, 256, 512]:
        f_list.append(bs_strided(f_list[-1], fs))
        uf_list.append(bds_strided(uf_list[-1], fs) + f_list[-1])
        luf_list.append(bdrs_strided(luf_list[-1], fs) + uf_list[-1])

    f = f_list.pop()
    uf = uf_list.pop()
    luf = tf.concat([luf_list.pop(), auto_encoder_features], axis=4)

    for fs in [256, 128, 64, 4]:
        f = tf.concat([
            tfl.Conv3DTranspose(fs, [2, 2, 2], strides=[2, 2, 2])(f),
            f_list.pop()
        ],
                      axis=4)
        f = tfl.Activation(tf.nn.elu)(f)
        uf = tf.concat([
            tfl.Conv3DTranspose(fs, [2, 2, 2], strides=[2, 2, 2])(uf),
            uf_list.pop(), f
        ],
                       axis=4)
        uf = tfl.Activation(tf.nn.elu)(uf)
        luf = tf.concat([
            tfl.Conv3DTranspose(fs, [2, 2, 2], strides=[2, 2, 2])(luf),
            luf_list.pop(), uf
        ],
                        axis=4)
        luf = tfl.Activation(tf.nn.elu)(luf)

    x = luf

    x = nn.Conv3D(n_filters=1, filter_size=[1, 1, 1], use_bias=True)(x)

    if params['final_activation'] == 'sigmoid':
        x = tf.nn.sigmoid(x)
    elif params['final_activation'] == 'elu':
        x = tf.nn.elu(x)
    elif params['final_activation'] == None:
        pass
    else:
        raise ("Unknown param valies for [final activation]: {}".format(
            params['final_activation']))

    output = {
        "predicted_occ": x,
        "predicted_free": 1 - x,
        "aux_occ": autoencoder_output
    }
    return tf.keras.Model(inputs=inputs, outputs=output)
コード例 #32
0
def U_Net2(features, labels, mode) :
    # Input Layer 
    
    input_layer = tf.reshape(features['mag'],[-1,512,128,1])
    
    # Convolutional Layer 1
    conv1 = tf.nn.leaky_relu(tf.layers.batch_normalization(tf.layers.conv2d(inputs = input_layer, filters=16, kernel_size=[5,5], 
                                                       strides=[2,2], padding="same", activation = None)))
    
    # Convolutional Layer 2 
    conv2 = tf.nn.leaky_relu(tf.layers.batch_normalization(tf.layers.conv2d(inputs = conv1, filters = 32, kernel_size = [5,5], 
                                                       strides = [2,2], padding="same", activation = None)))

    # Convolutional Layer 3
    conv3 = tf.nn.leaky_relu(tf.layers.batch_normalization(tf.layers.conv2d(inputs = conv2, filters = 64, kernel_size = [5,5], 
                                                       strides = [2,2], padding="same", activation = None)))

    # Convolutional Layer 4
    conv4 = tf.nn.leaky_relu(tf.layers.batch_normalization(tf.layers.conv2d(inputs = conv3, filters = 128, kernel_size = [5,5], 
                                                       strides = [2,2], padding="same", activation = None)))

    # Convolutional Layer 5
    conv5 = tf.nn.leaky_relu(tf.layers.batch_normalization(tf.layers.conv2d(inputs = conv4, filters = 256, kernel_size = [5,5], 
                                                       strides = [2,2], padding="same", activation = None)))

    # Convolutional Layer 6
    conv6 = tf.nn.leaky_relu(tf.layers.batch_normalization(tf.layers.conv2d(inputs = conv5, filters = 512, kernel_size = [5,5], 
                                                       strides = [2,2], padding="same", activation = None)))

    # Deconvolutional Layer1 (dropout)
    deconv1 = tf.nn.relu(tf.layers.batch_normalization(tf.layers.conv2d_transpose(inputs = conv6, filters = 256, kernel_size = [5,5], 
                                                                 strides = [2,2], padding="same", activation = None)))
    dropout1 = tf.layers.dropout(inputs = deconv1, rate = 0.5, training=mode == tf.estimator.ModeKeys.TRAIN)
    
    # Deconvolutional Layer2 (dropout)
    deconv2 = tf.nn.relu(tf.layers.batch_normalization(tf.layers.conv2d_transpose(inputs = concatenate([dropout1,conv5],3), filters = 128, kernel_size = [5,5], 
                                                                 strides = [2,2], padding="same", activation = None)))
    dropout2 = tf.layers.dropout(inputs = deconv2, rate = 0.5, training=mode == tf.estimator.ModeKeys.TRAIN)
                                      
    # Deconvolutional Layer3 (dropout)
    deconv3 = tf.nn.relu(tf.layers.batch_normalization(tf.layers.conv2d_transpose(inputs = concatenate([dropout2,conv4],3), filters = 64, kernel_size = [5,5], 
                                                                 strides = [2,2], padding="same", activation = None)))
    dropout3 = tf.layers.dropout(inputs = deconv3, rate = 0.5, training=mode == tf.estimator.ModeKeys.TRAIN)
                                      
    # Deconvolutional Layer4 
    deconv4 = tf.nn.relu(tf.layers.batch_normalization(tf.layers.conv2d_transpose(inputs = concatenate([dropout3,conv3],3), filters = 32, kernel_size = [5,5], 
                                                                 strides = [2,2], padding="same", activation = None)))
 
    # Deconvolutional Layer5 
    deconv5 = tf.nn.relu(tf.layers.batch_normalization(tf.layers.conv2d_transpose(inputs = concatenate([deconv4,conv2],3), filters = 16, kernel_size = [5,5], 
                                                                 strides = [2,2], padding="same", activation = None)))
    # Deconvolutional Layer6
    deconv6 = tf.layers.conv2d_transpose(inputs = concatenate([deconv5,conv1],3), filters = 1, kernel_size = [5,5],
                                                                   strides = [2,2], padding="same", activation = tf.nn.relu)
    
    predictions = {'outputs': deconv6
                  }
    
    if mode == tf.estimator.ModeKeys.PREDICT:
        return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
    
    loss = tf.losses.absolute_difference(labels,deconv6)
    
    if mode == tf.estimator.ModeKeys.TRAIN:
        optimizer = tf.train.AdamOptimizer(1e-4)
        train_op = optimizer.minimize(
            loss=loss,
            global_step=tf.train.get_global_step())
        return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
    
    return tf.estimator.EstimatorSpec(
        mode=mode, loss=loss)