Exemplo n.º 1
0
def ResBlock(input_shape,
             sampling=None,
             trainable_sortcut=True,
             spectral_normalization=False,
             batch_normalization=True,
             bn_momentum=0.9,
             bn_epsilon=0.00002,
             channels=256,
             k_size=3,
             summary=False,
             plot=False,
             name=None):
    '''
    ResBlock(input_shape, sampling=None, trainable_sortcut=True, 
             spectral_normalization=False, batch_normalization=True,
             bn_momentum=0.9, bn_epsilon=0.00002,
             channels=256, k_size=3, summary=False,
             plot=False, plot_name='res_block.png')""
             
    Build ResBlock as keras Model
    sampleing = 'up' for upsampling
                'down' for downsampling(AveragePooling)
                None for none
    
    '''
    #input_shape = input_layer.sahpe.as_list()

    res_block_input = Input(shape=input_shape)

    if batch_normalization:
        res_block_1 = BatchNormalization(momentum=bn_momentum,
                                         epsilon=bn_epsilon)(res_block_input)
    else:
        res_block_1 = res_block_input

    res_block_1 = Activation('relu')(res_block_1)

    if spectral_normalization:
        res_block_1 = ConvSN2D(
            channels,
            k_size,
            strides=1,
            padding='same',
            kernel_initializer='glorot_uniform')(res_block_1)
    else:
        res_block_1 = Conv2D(channels,
                             k_size,
                             strides=1,
                             padding='same',
                             kernel_initializer='glorot_uniform')(res_block_1)

    if sampling == 'up':
        res_block_1 = UpSampling2D()(res_block_1)
    else:
        pass

    if batch_normalization:
        res_block_2 = BatchNormalization(momentum=bn_momentum,
                                         epsilon=bn_epsilon)(res_block_1)
    else:
        res_block_2 = res_block_1
    res_block_2 = Activation('relu')(res_block_2)

    if spectral_normalization:
        res_block_2 = ConvSN2D(
            channels,
            k_size,
            strides=1,
            padding='same',
            kernel_initializer='glorot_uniform')(res_block_2)
    else:
        res_block_2 = Conv2D(channels,
                             k_size,
                             strides=1,
                             padding='same',
                             kernel_initializer='glorot_uniform')(res_block_2)

    if sampling == 'down':
        res_block_2 = AveragePooling2D()(res_block_2)
    else:
        pass

    if trainable_sortcut:
        if spectral_normalization:
            short_cut = ConvSN2D(
                channels,
                1,
                strides=1,
                padding='same',
                kernel_initializer='glorot_uniform')(res_block_input)
        else:
            short_cut = Conv2D(
                channels,
                1,
                strides=1,
                padding='same',
                kernel_initializer='glorot_uniform')(res_block_input)
    else:
        short_cut = res_block_input

    if sampling == 'up':
        short_cut = UpSampling2D()(short_cut)
    elif sampling == 'down':
        short_cut = AveragePooling2D()(short_cut)
    elif sampling == 'None':
        pass

    res_block_add = Add()([short_cut, res_block_2])

    res_block = Model(res_block_input, res_block_add, name=name)

    if plot:
        plot_model(res_block, name + '.png', show_layer_names=False)
    if summary:
        print(name)
        res_block.summary()

    return res_block
Exemplo n.º 2
0
de_model.add(
    Bidirectional(LSTM(200, return_sequences=True), merge_mode='concat'))

de_model.add(TimeDistributed(Dense(300)))
de_model.add(LeakyReLU())
de_model.add(Dropout(0.05))

de_model.add(TimeDistributed(Dense(257)))
de_model.add(Activation('sigmoid'))

#### Define the structure of Discriminator (surrogate loss approximator)  #####
print('Discriminator constructuring...')
_input = Input(shape=(257, None, 2))
_inputBN = BatchNormalization(axis=-1)(_input)

C1 = ConvSN2D(15, (5, 5), padding='valid',
              data_format='channels_last')(_inputBN)
C1 = LeakyReLU()(C1)

C2 = ConvSN2D(25, (7, 7), padding='valid', data_format='channels_last')(C1)
C2 = LeakyReLU()(C2)

C3 = ConvSN2D(40, (9, 9), padding='valid', data_format='channels_last')(C2)
C3 = LeakyReLU()(C3)

C4 = ConvSN2D(50, (11, 11), padding='valid', data_format='channels_last')(C3)
C4 = LeakyReLU()(C4)

Average_score = GlobalAveragePooling2D(name='Average_score')(
    C4)  #(batch_size, channels)

D1 = DenseSN(50)(Average_score)
Exemplo n.º 3
0
def word_cnn(dataset, use_glove=False, sn=True):
    filters = 250
    kernel_size = 3
    hidden_dims = 250

    max_len = config.word_max_len[dataset]
    num_classes = config.num_classes[dataset]
    loss = config.loss[dataset]
    activation = config.activation[dataset]
    embedding_dims = config.wordCNN_embedding_dims[dataset]
    num_words = config.num_words[dataset]

    print('Build word_cnn model...')
    model = Sequential()

    if use_glove:
        file_path = r'./glove.6B.{}d.txt'.format(str(embedding_dims))
        pdb.set_trace()
        get_embedding_index(file_path)
        get_embedding_matrix(dataset, num_words, embedding_dims)
        model.add(
            Embedding(  # Layer 0, Start
                input_dim=num_words +
                1,  # Size to dictionary, has to be input + 1
                output_dim=embedding_dims,  # Dimensions to generate
                weights=[embedding_matrix],  # Initialize word weights
                input_length=max_len,
                name="embedding_layer",
                trainable=False))
    else:
        # embedding_dims => 50
        model.add(Embedding(num_words, embedding_dims, input_length=max_len))
    model.add(Dropout(0.2))

    if sn:
        ##  with SN  ##
        model.add(Reshape((-1, 400, 50)))
        model.add(
            ConvSN2D(filters, (1, kernel_size),
                     strides=1,
                     padding='valid',
                     activation='relu'))
        model.add(Reshape((398, 250)))
        model.add(GlobalMaxPooling1D())
        model.add(DenseSN(hidden_dims))
        model.add(Dropout(0.2))
        model.add(Activation('relu'))
        model.add(DenseSN(num_classes))
        model.add(Activation(activation))
    else:

        ##  without SN  ##
        model.add(
            Conv1D(filters,
                   kernel_size,
                   padding='valid',
                   activation='relu',
                   strides=1))
        model.add(GlobalMaxPooling1D())
        model.add(Dense(hidden_dims))
        model.add(Dropout(0.2))
        model.add(Activation('relu'))
        model.add(Dense(num_classes))
        model.add(Activation(activation))

    model.compile(loss=loss, optimizer='adam', metrics=['accuracy'])

    return model
Exemplo n.º 4
0
def BuildDiscriminator(summary=True,
                       spectral_normalization=True,
                       batch_normalization=False,
                       bn_momentum=0.9,
                       bn_epsilon=0.00002,
                       resnet=True,
                       name='Discriminator',
                       plot=False):
    if resnet:
        model_input = Input(shape=(32, 32, 3))
        resblock_1 = ResBlock(input_shape=(32, 32, 3),
                              channels=128,
                              sampling='down',
                              batch_normalization=True,
                              spectral_normalization=spectral_normalization,
                              name='Discriminator_resblock_Down_1')
        h = resblock_1(model_input)
        resblock_2 = ResBlock(input_shape=(16, 16, 128),
                              channels=128,
                              sampling='down',
                              batch_normalization=True,
                              spectral_normalization=spectral_normalization,
                              name='Discriminator_resblock_Down_2')
        h = resblock_2(h)
        resblock_3 = ResBlock(input_shape=(8, 8, 128),
                              channels=128,
                              sampling=None,
                              batch_normalization=True,
                              spectral_normalization=spectral_normalization,
                              trainable_sortcut=False,
                              name='Discriminator_resblock_1')
        h = resblock_3(h)
        resblock_4 = ResBlock(input_shape=(8, 8, 128),
                              channels=128,
                              sampling=None,
                              batch_normalization=True,
                              spectral_normalization=spectral_normalization,
                              trainable_sortcut=False,
                              name='Discriminator_resblock_2')
        h = resblock_4(h)
        h = Activation('relu')(h)
        h = GlobalSumPooling2D()(h)
        model_output = DenseSN(1, kernel_initializer='glorot_uniform')(h)

        model = Model(model_input, model_output, name=name)

    else:
        if spectral_normalization:
            model = Sequential(name=name)
            model.add(
                ConvSN2D(64,
                         kernel_size=3,
                         strides=1,
                         kernel_initializer='glorot_uniform',
                         padding='same',
                         input_shape=(32, 32, 3)))
            model.add(LeakyReLU(0.1))
            model.add(
                ConvSN2D(64,
                         kernel_size=4,
                         strides=2,
                         kernel_initializer='glorot_uniform',
                         padding='same'))
            model.add(LeakyReLU(0.1))
            model.add(
                ConvSN2D(128,
                         kernel_size=3,
                         strides=1,
                         kernel_initializer='glorot_uniform',
                         padding='same'))
            model.add(LeakyReLU(0.1))
            model.add(
                ConvSN2D(128,
                         kernel_size=4,
                         strides=2,
                         kernel_initializer='glorot_uniform',
                         padding='same'))
            model.add(LeakyReLU(0.1))
            model.add(
                ConvSN2D(256,
                         kernel_size=3,
                         strides=1,
                         kernel_initializer='glorot_uniform',
                         padding='same'))
            model.add(LeakyReLU(0.1))
            model.add(
                ConvSN2D(256,
                         kernel_size=4,
                         strides=2,
                         kernel_initializer='glorot_uniform',
                         padding='same'))
            model.add(LeakyReLU(0.1))
            model.add(
                ConvSN2D(512,
                         kernel_size=3,
                         strides=1,
                         kernel_initializer='glorot_uniform',
                         padding='same'))
            model.add(LeakyReLU(0.1))
            model.add(GlobalSumPooling2D())
            model.add(DenseSN(1, kernel_initializer='glorot_uniform'))
        else:
            model = Sequential(name=name)
            model.add(
                Conv2D(64,
                       kernel_size=3,
                       strides=1,
                       kernel_initializer='glorot_uniform',
                       padding='same',
                       input_shape=(32, 32, 3)))
            model.add(LeakyReLU(0.1))
            model.add(
                Conv2D(64,
                       kernel_size=4,
                       strides=2,
                       kernel_initializer='glorot_uniform',
                       padding='same'))
            model.add(LeakyReLU(0.1))
            model.add(
                Conv2D(128,
                       kernel_size=3,
                       strides=1,
                       kernel_initializer='glorot_uniform',
                       padding='same'))
            model.add(LeakyReLU(0.1))
            model.add(
                Conv2D(128,
                       kernel_size=4,
                       strides=2,
                       kernel_initializer='glorot_uniform',
                       padding='same'))
            model.add(LeakyReLU(0.1))
            model.add(
                Conv2D(256,
                       kernel_size=3,
                       strides=1,
                       kernel_initializer='glorot_uniform',
                       padding='same'))
            model.add(LeakyReLU(0.1))
            model.add(
                Conv2D(256,
                       kernel_size=4,
                       strides=2,
                       kernel_initializer='glorot_uniform',
                       padding='same'))
            model.add(LeakyReLU(0.1))
            model.add(
                Conv2D(512,
                       kernel_size=3,
                       strides=1,
                       kernel_initializer='glorot_uniform',
                       padding='same'))
            model.add(LeakyReLU(0.1))
            model.add(GlobalSumPooling2D())
            model.add(Dense(1, kernel_initializer='glorot_uniform'))
    if plot:
        plot_model(model, name + '.png', show_layer_names=True)

    if summary:
        print('Discriminator')
        print('Spectral Normalization: {}'.format(spectral_normalization))
        model.summary()
    return model
Exemplo n.º 5
0
                      input_tensor=layers.Input(shape=(image_size[0],
                                                       image_size[1], 3)),
                      include_top=False)

#last convolution layer
base_out = base_model.output

dims = base_out.shape.as_list()[1:]
feat_dim = dims[2] * pool_size * pool_size
base_channels = dims[2]

x = base_out
x = squeeze_excite_block(x)  #Added new

#self-attention
x_f = ConvSN2D(base_channels // 8, kernel_size=1, strides=1,
               padding='same')(x)  # [bs, h, w, c']
x_g = ConvSN2D(base_channels // 8, kernel_size=1, strides=1,
               padding='same')(x)  # [bs, h, w, c']
x_h = ConvSN2D(base_channels, kernel_size=1, strides=1, padding='same')(x)
x_final = SelfAttention(filters=base_channels)([x, x_f, x_g, x_h])

#x_final = base_out

full_img = layers.Lambda(
    lambda x: K.tf.image.resize_images(
        x, size=(ROIS_resolution, ROIS_resolution)),
    name='Lambda_img_1'
)(
    x_final
)  #Use bilinear upsampling (default tensorflow image resize) to a reasonable size
"""Do the ROIs information and separate them out"""
Exemplo n.º 6
0
def encoder_layers_dcgan_univ(image_size, image_channels, base_channels,
                              bn_allowed, wd, encoder_use_sn):

    n_upsample = 0
    n = image_size[0]
    while n % 2 == 0 and n >= 8:
        n = n // 2
        n_upsample += 1
    start_width = n

    kernel = 4
    upsamples = 0

    channels = base_channels
    width = image_size[0]
    layers = []
    idx = 0
    while width >= 8:
        if n_upsample <= upsamples:
            border_mode = "same"
            stride = 1
            activation = "linear"
            use_bn = False
        elif idx == 0:
            border_mode = "same"
            stride = 1
            activation = "relu"
            use_bn = bn_allowed
        else:
            border_mode = "same"
            stride = 2
            width = width // 2
            channels = 2 * channels
            upsamples += 1
            activation = "relu"
            use_bn = bn_allowed

        if encoder_use_sn:
            layers.append(
                ConvSN2D(channels, (kernel, kernel),
                         strides=(stride, stride),
                         padding=border_mode,
                         use_bias=False,
                         kernel_regularizer=l2(wd)))
        else:
            layers.append(
                Conv2D(channels, (kernel, kernel),
                       strides=(stride, stride),
                       padding=border_mode,
                       use_bias=False,
                       kernel_regularizer=l2(wd)))

        if use_bn:
            layers.append(BatchNormalization(axis=1))

        if activation == "relu":
            layers.append(Activation(activation,
                                     name="encoder_{}".format(idx)))
        else:
            layers.append(Activation(activation,
                                     name="encoder_{}".format(idx)))
        idx += 1
    layers.append(Flatten())
    return layers
Exemplo n.º 7
0
def encoder_layers_baseline_mnist(image_size, image_channels, base_channels,
                                  bn_allowed, wd, seed, encoder_use_sn):
    """
    Following Nalisnick et al. (arxiv: 1810.09136), for MNIST, we use the encoder architecture
    described in Rosca et al. (arxiv: 1802.06847) in appendix K table 4.
    """

    layers = []

    initializer = RandomNormal(mean=0.0, stddev=np.sqrt(0.02), seed=seed)
    if encoder_use_sn:
        layers.append(
            ConvSN2D(8, (5, 5),
                     strides=(2, 2),
                     padding='same',
                     kernel_initializer=initializer,
                     bias_initializer=initializer,
                     kernel_regularizer=l2(wd)))
        layers.append(Activation('relu'))

        layers.append(
            ConvSN2D(16, (5, 5),
                     strides=(1, 1),
                     padding='same',
                     kernel_initializer=initializer,
                     bias_initializer=initializer,
                     kernel_regularizer=l2(wd)))
        layers.append(Activation('relu'))

        layers.append(
            ConvSN2D(32, (5, 5),
                     strides=(2, 2),
                     padding='same',
                     kernel_initializer=initializer,
                     bias_initializer=initializer,
                     kernel_regularizer=l2(wd)))
        layers.append(Activation('relu'))

        layers.append(
            ConvSN2D(64, (5, 5),
                     strides=(1, 1),
                     padding='same',
                     kernel_initializer=initializer,
                     bias_initializer=initializer,
                     kernel_regularizer=l2(wd)))
        layers.append(Activation('relu'))

        layers.append(
            ConvSN2D(64, (5, 5),
                     strides=(2, 2),
                     padding='same',
                     kernel_initializer=initializer,
                     bias_initializer=initializer,
                     kernel_regularizer=l2(wd)))
        layers.append(Activation('relu'))
    else:
        layers.append(
            Conv2D(8, (5, 5),
                   strides=(2, 2),
                   padding='same',
                   kernel_initializer=initializer,
                   bias_initializer=initializer,
                   kernel_regularizer=l2(wd)))
        layers.append(Activation('relu'))

        layers.append(
            Conv2D(16, (5, 5),
                   strides=(1, 1),
                   padding='same',
                   kernel_initializer=initializer,
                   bias_initializer=initializer,
                   kernel_regularizer=l2(wd)))
        layers.append(Activation('relu'))

        layers.append(
            Conv2D(32, (5, 5),
                   strides=(2, 2),
                   padding='same',
                   kernel_initializer=initializer,
                   bias_initializer=initializer,
                   kernel_regularizer=l2(wd)))
        layers.append(Activation('relu'))

        layers.append(
            Conv2D(64, (5, 5),
                   strides=(1, 1),
                   padding='same',
                   kernel_initializer=initializer,
                   bias_initializer=initializer,
                   kernel_regularizer=l2(wd)))
        layers.append(Activation('relu'))

        layers.append(
            Conv2D(64, (5, 5),
                   strides=(2, 2),
                   padding='same',
                   kernel_initializer=initializer,
                   bias_initializer=initializer,
                   kernel_regularizer=l2(wd)))
        layers.append(Activation('relu'))

    layers.append(Flatten())
    return layers
Exemplo n.º 8
0
    def _build_common_encoder_ganda(image, min_latent_res=8):

        # # build a relatively standard conv net, with LeakyReLUs as suggested in ACGAN
        # cnn = Sequential()
        # cnn.add(ConvSN2D(32, kernel_size=3, strides=2,kernel_initializer='glorot_uniform', padding='same',bias=True))
        # cnn.add(LeakyReLU())
        # # cnn.add(Dropout(0.3))
        # cnn.add(ConvSN2D(64, kernel_size=3, strides=1,kernel_initializer='glorot_uniform', padding='same',bias=True))
        # cnn.add(LeakyReLU())
        # # cnn.add(Dropout(0.3))

        # cnn.add(ConvSN2D(128, kernel_size=3, strides=2,kernel_initializer='glorot_uniform', padding='same',bias=True))
        # cnn.add(LeakyReLU())
        # # cnn.add(Dropout(0.3))

        # cnn.add(ConvSN2D(256, kernel_size=3, strides=1,kernel_initializer='glorot_uniform', padding='same',bias=True))
        # cnn.add(LeakyReLU())
        # # cnn.add(Dropout(0.3))

        # cnn.add(ConvSN2D(256, kernel_size=3, strides=2,kernel_initializer='glorot_uniform', padding='same',bias=True))
        # cnn.add(LeakyReLU())
        # # cnn.add(Dropout(0.3))

        # cnn.add(ConvSN2D(256, kernel_size=3, strides=1,kernel_initializer='glorot_uniform', padding='same',bias=True))
        # cnn.add(LeakyReLU())
        # # cnn.add(Dropout(0.3))

        # cnn.add(ConvSN2D(256, kernel_size=3, strides=2,kernel_initializer='glorot_uniform', padding='same',bias=True))
        # cnn.add(LeakyReLU())
        # # cnn.add(Dropout(0.3))

        # cnn.add(ConvSN2D(256, kernel_size=3, strides=1,kernel_initializer='glorot_uniform', padding='same',bias=True))
        # cnn.add(LeakyReLU())
        # # cnn.add(Dropout(0.3))
        # # 96-----------
        # cnn.add(ConvSN2D(256, kernel_size=3, strides=2,kernel_initializer='glorot_uniform', padding='same',bias=True))
        # cnn.add(LeakyReLU())
        # # cnn.add(Dropout(0.3))

        # cnn.add(ConvSN2D(256, kernel_size=3, strides=1,kernel_initializer='glorot_uniform', padding='same',bias=True))
        # cnn.add(LeakyReLU())
        # # cnn.add(Dropout(0.3))
        # # 128------------
        # build a relatively standard conv net, with LeakyReLUs as suggested in ACGAN
        cnn = Sequential()
        cnn.add(
            ConvSN2D(32,
                     kernel_size=3,
                     strides=2,
                     kernel_initializer='glorot_uniform',
                     padding='same'))
        cnn.add(LeakyReLU())
        # cnn.add(Dropout(0.3))

        cnn.add(
            ConvSN2D(64,
                     kernel_size=3,
                     strides=1,
                     kernel_initializer='glorot_uniform',
                     padding='same'))
        cnn.add(LeakyReLU())
        # cnn.add(Dropout(0.3))

        cnn.add(
            ConvSN2D(128,
                     kernel_size=3,
                     strides=2,
                     kernel_initializer='glorot_uniform',
                     padding='same'))
        cnn.add(LeakyReLU())
        # cnn.add(Dropout(0.3))

        cnn.add(
            ConvSN2D(256,
                     kernel_size=3,
                     strides=1,
                     kernel_initializer='glorot_uniform',
                     padding='same'))
        cnn.add(LeakyReLU())
        # cnn.add(Dropout(0.3))

        # deafault ----------- 32
        feature_resolution = resolution / 4
        print("resolution : " + str(resolution))
        print("feature_resolution: " + str(feature_resolution))
        while feature_resolution > 8:
            print("feature_resolution: " + str(feature_resolution))
            cnn.add(
                ConvSN2D(256,
                         kernel_size=3,
                         strides=2,
                         kernel_initializer='glorot_uniform',
                         padding='same'))
            cnn.add(LeakyReLU())
            cnn.add(
                ConvSN2D(256,
                         kernel_size=3,
                         strides=1,
                         kernel_initializer='glorot_uniform',
                         padding='same'))
            cnn.add(LeakyReLU())
            feature_resolution = feature_resolution / 2

        cnn.add(Flatten())

        features = cnn(image)
        return features
Exemplo n.º 9
0
    x = SelfAttention(filters=1024)([base_out, x_f, x_g, x_h])
    y = SelfAttention(filters=1024)([base_out, y_f, y_g, y_h])
    z = SelfAttention(filters=1024)([base_out, z_f, z_g, z_h])
    '''
    tensor_yaw = layers.Input(shape=(14, 14,
                                     1024))  #Input to the parallal stream
    tensor_pitch = layers.Input(shape=(14, 14,
                                       1024))  #Input to the parallal stream
    tensor_roll = layers.Input(shape=(14, 14,
                                      1024))  #Input to the parallal stream
    yaw = resnet_block5_weights(model, tensor_yaw, base_layer)
    pitch = resnet_block5_weights(model, tensor_pitch, base_layer)
    roll = resnet_block5_weights(model, tensor_roll, base_layer)

    x = yaw(base_out)
    x_f = ConvSN2D(256, kernel_size=1, strides=1,
                   padding='same')(x)  # [bs, h, w, c']
    x_g = ConvSN2D(256, kernel_size=1, strides=1,
                   padding='same')(x)  # [bs, h, w, c']
    x_h = ConvSN2D(2048, kernel_size=1, strides=1, padding='same')(x)
    x = SelfAttention(filters=2048)([x, x_f, x_g, x_h])

    p1 = layers.MaxPooling2D(2, strides=2, padding='valid')(x)
    p1 = layers.Reshape((-1, 2048))(p1)
    p2 = layers.MaxPooling2D(3, strides=2, padding='valid')(x)
    p2 = layers.Reshape((-1, 2048))(p2)
    p3 = layers.MaxPooling2D(3, strides=3, padding='valid')(x)
    p3 = layers.Reshape((-1, 2048))(p3)
    p4 = layers.MaxPooling2D(4, strides=2, padding='valid')(x)
    p4 = layers.Reshape((-1, 2048))(p4)
    p5 = layers.MaxPooling2D(4, strides=3, padding='valid')(x)
    p5 = layers.Reshape((-1, 2048))(p5)