コード例 #1
0
def seq_2_seq_att_LSTM(X_embedding, MAX_LEN, num_words,
                EMBEDDING_DIM, LSTM_units, LSTM_dropout):

    # Encoder
    # Encoder input shape is (batch size, max length)
    encoder_inputs = Input(shape=(MAX_LEN,))
    encoder_embedding = Embedding(input_dim=num_words, output_dim=EMBEDDING_DIM, 
                        input_length = MAX_LEN, embeddings_initializer=Constant(X_embedding), 
                        trainable=False)(encoder_inputs)

    # LSTM
    encoder_lstm = LSTM(units=LSTM_units, return_state=True, return_sequences=True, recurrent_dropout=LSTM_dropout, dropout=LSTM_dropout)
    encoder_outputs, state_h, state_c = encoder_lstm(encoder_embedding)
    encoder_states = [state_h, state_c]

    # Decoder
    decoder_inputs = Input(shape=(None,))
    decoder_embedding_layer = Embedding(input_dim=num_words, output_dim=EMBEDDING_DIM, trainable=True)
    decoder_embedding = decoder_embedding_layer(decoder_inputs)
    decoder_lstm = LSTM(units=LSTM_units, return_state=True, return_sequences=True, recurrent_dropout=LSTM_dropout, dropout=LSTM_dropout)
    decoder_outputs, _, _ = decoder_lstm(decoder_embedding, initial_state=encoder_states)

    # Attention
    attention_weight = dot([decoder_outputs, encoder_outputs], axes=[2, 2], normalize=True) # cosine similarity
    attention = Activation('softmax')(attention_weight)

    context = dot([attention, encoder_outputs], axes=[2,1])
    decoder_combined_context = concatenate([context, decoder_outputs])

    att_output = TimeDistributed(Dense(64, activation="tanh"))(decoder_combined_context) 
    output = TimeDistributed(Dense(num_words, activation="softmax"))(att_output)
    
    model = Model(inputs=[encoder_inputs,decoder_inputs], outputs=output)

    return model
コード例 #2
0
def test_conditional_dense():

    import numpy as np

    def kernel_init(shape):
        np.random.seed(0)
        return np.random.normal(size=shape)

    inp = Input((2, ))
    cls = Input((1, ), dtype='int32')
    dence = ConditionalDense(number_of_classes=3,
                             units=2,
                             use_bias=True,
                             kernel_initializer=kernel_init,
                             bias_initializer=kernel_init)([inp, cls])
    rs_inp = Reshape((1, 1, 2))([inp])
    cv_sep = ConditionalConv2D(number_of_classes=3,
                               kernel_size=(1, 1),
                               filters=2,
                               padding='valid',
                               use_bias=True,
                               kernel_initializer=kernel_init,
                               bias_initializer=kernel_init)([rs_inp, cls])
    m = Model([inp, cls], [dence, cv_sep])
    x = np.arange(2 * 2).reshape((2, 2))
    cls = np.expand_dims(np.arange(2) % 3, axis=-1)
    out1, out2 = m.predict([x, cls])
    out2 = np.squeeze(out2, axis=(1, 2))

    assert np.sum(np.abs(out1 - out2)) < 1e-5
コード例 #3
0
def test_conditional_bn():

    import numpy as np

    def beta_init(shape):
        a = np.empty(shape)
        a[0] = 1
        a[1] = 2
        a[2] = 3
        return a

    K.set_learning_phase(0)
    inp = Input((2, 2, 1))
    cls = Input((1, ), dtype='int32')
    out = ConditinalBatchNormalization(
        3,
        axis=-1,
        gamma_initializer=beta_init,
        moving_variance_initializer=lambda sh: 0.666666666667 * np.ones(sh),
        beta_initializer='zeros',
        moving_mean_initializer=lambda sh: 2 * np.ones(sh))([inp, cls])
    m = Model([inp, cls], out)
    x = np.ones((3, 2, 2, 1))

    x[1] = x[1] * 2
    x[2] = x[2] * 3

    cls = np.expand_dims(np.arange(3), axis=-1)
    out = m.predict([x, cls])
    out = np.squeeze(out)

    assert np.all(np.abs(out[0] + 1.22) < 0.1)
    assert np.all(np.abs(out[1] - 0) < 0.1)
    assert np.all(np.abs(out[2] - 3.67) < 0.1)
コード例 #4
0
def test_sn_conditional_dense_with_renorm():

    import numpy as np
    from numpy.linalg import svd

    def kernel_init(shape):
        np.random.seed(0)
        return np.random.normal(size=shape)

    inp = Input((4, ))
    cls = Input((1, ), dtype='int32')
    out = SNCondtionalDense(number_of_classes=3,
                            units=10,
                            kernel_initializer=kernel_init,
                            stateful=True,
                            renormalize=True)([inp, cls])
    m = Model([inp, cls], [out])
    x = np.arange(5 * 4).reshape((5, 4))
    cls_val = (np.arange(5) % 3)[:, np.newaxis]

    for i in range(100):
        m.predict([x, cls_val])

    kernel = K.get_value(m.layers[2].kernel)
    u_val = K.get_value(m.layers[2].u)

    kernel = kernel.reshape((-1, kernel.shape[-1]))
    _, s, _ = svd(kernel)

    w = K.placeholder(kernel.shape)
    u = K.placeholder(u_val.shape)
    max_sg_fun = K.function([w, u], [max_singular_val(w, u)[0]])

    assert np.abs(max_sg_fun([kernel, u_val]) - s[0])[0] < 1e-5
コード例 #5
0
def test_conditional_center_scale():

    import numpy as np

    def beta_init(shape):
        a = np.empty(shape)
        a[0] = 1
        a[1] = 2
        a[2] = 3
        return a

    inp = Input((2, 2, 1))
    cls = Input((1, ), dtype='int32')
    m = Model([inp, cls],
              ConditionalCenterScale(3,
                                     axis=-1,
                                     gamma_initializer=beta_init,
                                     beta_initializer=beta_init)([inp, cls]))
    x = np.ones((3, 2, 2, 1))
    cls = np.expand_dims(np.arange(3), axis=-1)
    out = m.predict([x, cls])

    assert np.all(out[0] == 2)
    assert np.all(out[1] == 4)
    assert np.all(out[2] == 6)
コード例 #6
0
def test_conditional_conv11():

    import numpy as np

    def kernel_init(shape):
        np.random.seed(0)
        return np.random.normal(size=shape)

    inp = Input(batch_shape=(10, 10, 10, 10))
    cls = Input(batch_shape=(10, 1), dtype='int32')
    cv11 = ConditionalConv11(number_of_classes=3,
                             filters=20,
                             kernel_initializer=kernel_init,
                             bias_initializer=kernel_init)([inp, cls])
    cv_sep = ConditionalConv2D(number_of_classes=3,
                               kernel_size=(1, 1),
                               filters=20,
                               padding='valid',
                               use_bias=True,
                               kernel_initializer=kernel_init,
                               bias_initializer=kernel_init)([inp, cls])
    m = Model([inp, cls], [cv11, cv_sep])
    x = np.arange(10 * 10 * 10 * 10).reshape((10, 10, 10, 10))
    cls = np.expand_dims(np.arange(10) % 3, axis=-1)
    out1, out2 = m.predict([x, cls])

    assert np.mean(np.abs(out1 - out2)) < 1e-2
コード例 #7
0
def test_conditional_conv():

    import numpy as np

    def kernel_init(shape):
        a = np.empty(shape)
        a[0] = 1
        a[1] = 2
        a[2] = 3
        return a

    inp = Input((2, 2, 1))
    cls = Input((1, ), dtype='int32')
    m = Model([inp, cls],
              ConditionalConv2D(number_of_classes=3,
                                filters=1,
                                kernel_size=(3, 3),
                                padding='same',
                                kernel_initializer=kernel_init,
                                bias_initializer=kernel_init)([inp, cls]))
    x = np.ones((3, 2, 2, 1))
    cls = np.expand_dims(np.arange(3), axis=-1)
    cls[2] = 0
    out = m.predict([x, cls])

    assert np.all(out[0] == 5)
    assert np.all(out[1] == 10)
    assert np.all(out[2] == 5)
コード例 #8
0
def test_triangular_conv11():

    import numpy as np

    def kernel_init(shape):
        a = np.empty(shape)
        a[..., 0] = 1
        a[..., 1] = 2
        #a[1] = 2
        #a[2] = 3
        return a

    inp = Input((2, 2, 2))
    cls = Input((1, ), dtype='int32')
    m = Model([inp, cls],
              ConditionalConv11(number_of_classes=1,
                                filters=2,
                                triangular=True,
                                kernel_initializer=kernel_init,
                                bias_initializer='zeros')([inp, cls]))
    x = np.ones((1, 2, 2, 2))
    cls = np.expand_dims(np.arange(1), axis=-1)
    cls[:] = 0
    out = m.predict([x, cls])
    assert np.all(out[0, ..., 0] == 1)
    assert np.all(out[0, ..., 1] == 4)
コード例 #9
0
def test_triangular_factorized_conv11():

    import numpy as np

    def kernel_init(shape):
        a = np.empty(shape)
        a[0] = 1
        if shape[0] != 1:
            a[1] = 2
        #a[1] = 2
        #a[2] = 3
        return a

    inp = Input((2, 2, 2))
    cls = Input((1, ), dtype='int32')
    m = Model([inp, cls],
              FactorizedConv11(number_of_classes=2,
                               filters=2,
                               filters_emb=1,
                               kernel_initializer=kernel_init,
                               bias_initializer='zeros')([inp, cls]))
    x = np.ones((2, 2, 2, 2))
    cls = np.expand_dims(np.arange(2), axis=-1)
    #cls[:] = 0
    out = m.predict([x, cls])
    #print np.squeeze(out[0])
    #print np.squeeze(out[1])
    assert np.all(out[0] == 2)
    assert np.all(out[1] == 2)
コード例 #10
0
def test_sn_depthwise_with_renorm():

    import numpy as np
    from numpy.linalg import svd

    def kernel_init(shape):
        return np.random.normal(size=shape)

    inp = Input((2, 3, 4))
    cls = Input((1, ), dtype='int32')
    out = SNConditionalDepthwiseConv2D(number_of_classes=3,
                                       kernel_size=(3, 3),
                                       padding='same',
                                       filters=4,
                                       kernel_initializer=kernel_init,
                                       stateful=True)([inp, cls])
    m = Model([inp, cls], [out])
    x = np.arange(5 * 2 * 3 * 4).reshape((5, 2, 3, 4))
    cls_val = np.zeros(shape=(5, 1))
    for i in range(100):
        m.predict([x, cls_val])

    kernel = K.get_value(m.layers[2].kernel)
    u_val = K.get_value(m.layers[2].u)

    kernel = kernel.reshape((-1, kernel.shape[3]))

    _, s, _ = svd(kernel)

    w = K.placeholder(kernel.shape)
    u = K.placeholder(u_val.shape)
    max_sg_fun = K.function([w, u], [max_singular_val(w, u)[0]])

    assert np.abs(max_sg_fun([kernel, u_val]) - s[0])[0] < 1e-5
コード例 #11
0
    def load_autoencoder(self, param, train_images=None):
        cae_file = param.get('directory_models') + param.get('cae_filename')
        e_file = param.get('directory_models') + param.get('encoder_filename')
        d_file = param.get('directory_models') + param.get('decoder_filename')
        #cae_file = './pretrained_models/' + param.get('cae_filename')
        #e_file = './pretrained_models/' + param.get('encoder_filename')
        #d_file = './pretrained_models/' + param.get('decoder_filename')

        self.autoencoder = []
        self.encoder = []
        self.decoder = []
        # if cae file already exists (i.e. cae has been already trained):
        if os.path.isfile(cae_file) and os.path.isfile(
                e_file) and os.path.isfile(d_file):
            # load convolutional autoencoder
            print('Loading existing pre-trained autoencoder: ', cae_file)
            # clear tensorflow graph
            #utils.clear_tensorflow_graph()
            self.autoencoder = load_model(
                cae_file)  # keras.load_model function

            # Create a separate encoder model
            encoder_inp = Input(shape=(param.get('image_size'),
                                       param.get('image_size'),
                                       param.get('image_channels')))
            encoder_layer = self.autoencoder.layers[1](encoder_inp)
            enc_layer_idx = utils.getLayerIndexByName(self.autoencoder,
                                                      'encoded')
            for i in range(2, enc_layer_idx + 1):
                encoder_layer = self.autoencoder.layers[i](encoder_layer)
            self.encoder = Model(encoder_inp, encoder_layer)
            if (param.get('verbosity_level') > 2):
                print(self.encoder.summary())
            # Create a separate decoder model
            decoder_inp = Input(shape=(param.get('code_size'), ))
            decoder_layer = self.autoencoder.layers[enc_layer_idx +
                                                    1](decoder_inp)
            for i in range(enc_layer_idx + 2, len(self.autoencoder.layers)):
                decoder_layer = self.autoencoder.layers[i](decoder_layer)

            self.decoder = Model(decoder_inp, decoder_layer)
            if (param.get('verbosity_level') > 2):
                print(self.decoder.summary())
            print('Autoencoder loaded')
        else:  # otherwise train a new one
            print(
                'Could not find autoencoder files. Building and training a new one.'
            )
            self.autoencoder, self.encoder, self.decoder = self.build_autoencoder(
                param)
            if param.get('train_cae_offline'):
                if train_images is None:
                    print('I need some images to train the autoencoder')
                    sys.exit(1)
                self.train_autoencoder_offline(train_images, param)
コード例 #12
0
def define_discriminator(image_shape):
    # weight initialization
    init = RandomNormal(stddev=0.02)
    # source image input
    in_src_image = Input(shape=image_shape)
    # target image input
    in_target_image = Input(shape=image_shape)
    # concatenate images channel-wise
    merged = Concatenate()([in_src_image, in_target_image])
    # C64
    d = Conv2D(64, (4, 4),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(merged)
    d = LeakyReLU(alpha=0.2)(d)
    # C128
    d = Conv2D(128, (4, 4),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(d)
    d = BatchNormalization()(d)
    d = LeakyReLU(alpha=0.2)(d)
    # C256
    d = Conv2D(256, (4, 4),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(d)
    d = BatchNormalization()(d)
    d = LeakyReLU(alpha=0.2)(d)
    # C512
    d = Conv2D(512, (4, 4),
               strides=(2, 2),
               padding='same',
               kernel_initializer=init)(d)
    d = BatchNormalization()(d)
    d = LeakyReLU(alpha=0.2)(d)
    # second last output layer
    d = Conv2D(512, (4, 4), padding='same', kernel_initializer=init)(d)
    d = BatchNormalization()(d)
    d = LeakyReLU(alpha=0.2)(d)
    # patch output
    d = Conv2D(1, (4, 4), padding='same', kernel_initializer=init)(d)
    patch_out = Activation('sigmoid')(d)
    # define model
    model = Model([in_src_image, in_target_image], patch_out)
    # compile model
    opt = Adam(lr=0.0002, beta_1=0.5)
    model.compile(loss='binary_crossentropy',
                  optimizer=opt,
                  loss_weights=[0.5])
    return model
コード例 #13
0
def make_discriminator(image_size, number_of_classes):
    input_a = Input(image_size + (3, ))
    input_b = Input(image_size + (1, ))
    out = Concatenate(axis=-1)([input_a, input_b])
    out = Conv2D(64, kernel_size=(4, 4), strides=(2, 2))(out)
    out = block(out, 128)
    out = block(out, 256)
    real_vs_fake = block(out, 1, bn=False)
    real_vs_fake = Flatten()(real_vs_fake)
    cls = Flatten()(out)
    cls = Dense(128, activation='relu')(cls)
    cls = Dense(number_of_classes)(cls)

    return Model(inputs=[input_a, input_b], outputs=[real_vs_fake, cls])
コード例 #14
0
def seq_2_seq_biLSTM_att(X_embedding, MAX_LEN, num_words,
                EMBEDDING_DIM, LSTM_units, LSTM_dropout):
    
    # Encoder
    # [?, 100]
    encoder_inputs = Input(shape=(MAX_LEN,))

    # [?, 100, 300]
    encoder_embedding = Embedding(input_dim=num_words, output_dim=EMBEDDING_DIM, 
                        input_length = MAX_LEN, embeddings_initializer=Constant(X_embedding), 
                        trainable=False)(encoder_inputs)

    # LSTM
    
    encoder_lstm = Bidirectional(LSTM(units=LSTM_units, return_state=True, return_sequences=True, recurrent_dropout=LSTM_dropout, dropout=LSTM_dropout))
    # [?, 100, 300]
    encoder_outputs, forward_h, forward_c, backward_h, backward_c = encoder_lstm(encoder_embedding)
    # [?, 300]
    state_h = concatenate([forward_h, backward_h])
    state_c = concatenate([forward_c, backward_c])
    encoder_states = [state_h, state_c]

    # Decoder
    # [?, 30]
    decoder_inputs = Input(shape=(None,))
    decoder_embedding_layer = Embedding(input_dim=num_words, output_dim=EMBEDDING_DIM, trainable=True)
    # [?, 30, 300]
    decoder_embedding = decoder_embedding_layer(decoder_inputs)
    decoder_lstm = LSTM(units=2*LSTM_units, return_state=True, return_sequences=True, recurrent_dropout=LSTM_dropout, dropout=LSTM_dropout)
    # [?, 30, 300]
    decoder_outputs, _, _ = decoder_lstm(decoder_embedding, initial_state=encoder_states)
    # [?, 30, 100]
    attention_weight = dot([decoder_outputs, encoder_outputs], axes=[2, 2])
    attention = Activation('softmax')(attention_weight)

    # [?, 30, 300]
    context = dot([attention, encoder_outputs], axes=[2,1]) #[?, 100, 300] = dot([?,?,100] , [?, 100, 300])
    
    # [?, 30, 600]
    decoder_combined_context = concatenate([context, decoder_outputs])

    # [?, 30, 64]
    att_output = TimeDistributed(Dense(128, activation="tanh"))(decoder_combined_context) 
    # [?, 30, 39093]
    output = TimeDistributed(Dense(num_words, activation="softmax"))(att_output)
    
    model = Model(inputs=[encoder_inputs,decoder_inputs], outputs=output)

    return model
コード例 #15
0
    def build_forward_code_model(self, param):
        print('building forward code model...')

        # create fwd model layers
        cmd_fwd_inp = Input(shape=(param.get('romi_input_dim'), ),
                            name='fwd_input')
        #x = Dense(param.get('code_size'), activation=self.activation_positive_tanh)(cmd_fwd_inp)
        x = Dense(param.get('code_size'), activation='relu')(cmd_fwd_inp)
        # x = Dense(param.get('code_size') * 10, activation=self.activation_positive_tanh)(x)
        # x = Dense(param.get('code_size') * 10, activation=self.activation_positive_tanh)(x)
        x = Dense(param.get('code_size') * 10, activation='relu')(x)

        #x = Dense(param.get('code_size'),)(cmd_fwd_inp)
        #x = Dense(param.get('code_size') * 10)(x)

        code = Dense(param.get('code_size'),
                     activation='sigmoid',
                     name='output')(x)
        fwd_model = Model(cmd_fwd_inp, code)
        #sgd = optimizers.SGD(lr=0.0014, decay=0.0, momentum=0.8, nesterov=True)
        fwd_model.compile(optimizer='adadelta', loss='mean_squared_error')
        #fwd_model.compile(optimizer=sgd, loss='mean_squared_error')
        if (param.get('verbosity_level') > 2):
            print('forward model')
            fwd_model.summary()
        return fwd_model
コード例 #16
0
ファイル: HD-Net.py プロジェクト: bacterous/HD-Net_keras
def HD_Net(input_shape, classes, levels, modules, filters, dilations):
    """
    Builds a custom HD_Net model.
    :param input_shape: input shape, tuple, (channels, depth, rows, cols)
    :param classes: segmentation region types, int
    :param levels: number of hierarchical layers, int
    :param modules: number of residual dilated convolutional block in each hierarchical layer, array like, len(modules)==levels
    :param filters: filters in each hierarchical layer, array like, len(filters)==levels
    :param dilations: dilation rate of each residual dilated convolutional block, array like, dilations.shape=(levels, modules)
    :return: segmentation result, tensor, (batch, classes, depth, rows, cols)
    """
    input = Input(shape=input_shape)

    x = []
    x_pre = input

    for n_level, n_module, n_filter, dilation in zip(np.arange(levels),
                                                     modules, filters,
                                                     dilations):
        x_pre, output = hierarchical_layer(n_level, n_filter, classes,
                                           n_module, dilation)(x_pre)
        print('level:', n_level, 'output:', output.shape, 'x_pre:',
              x_pre.shape)
        x.append(output)

    x = concatenate(x, axis=1)
    x = Lambda(fusion(32, classes))(x)
    model = Model(input, x)

    return model
コード例 #17
0
def demo_create_encoder(latent_dim, cat_dim, window_size, input_dim):
    input_layer = Input(shape=(window_size, input_dim))
    
    code = TimeDistributed(Dense(64, activation='linear'))(input_layer)
    code = Bidirectional(LSTM(128, return_sequences=True))(code)
    code = BatchNormalization()(code)
    code = ELU()(code)
    code = Bidirectional(LSTM(64))(code)
    code = BatchNormalization()(code)
    code = ELU()(code)
    
    cat = Dense(64)(code)
    cat = BatchNormalization()(cat)
    cat = PReLU()(cat)
    cat = Dense(cat_dim, activation='softmax')(cat)
    
    latent_repr = Dense(64)(code)
    latent_repr = BatchNormalization()(latent_repr)
    latent_repr = PReLU()(latent_repr)
    latent_repr = Dense(latent_dim, activation='linear')(latent_repr)
    
    decode = Concatenate()([latent_repr, cat])
    decode = RepeatVector(window_size)(decode)
    decode = Bidirectional(LSTM(64, return_sequences=True))(decode)
    decode = ELU()(decode)
    decode = Bidirectional(LSTM(128, return_sequences=True))(decode)
    decode = ELU()(decode)
    decode = TimeDistributed(Dense(64))(decode)
    decode = ELU()(decode)
    decode = TimeDistributed(Dense(input_dim, activation='linear'))(decode)
    
    error = Subtract()([input_layer, decode])
        
    return Model(input_layer, [decode, latent_repr, cat, error])
コード例 #18
0
def test_sn_conv2D():

    import numpy as np
    from numpy.linalg import svd

    def kernel_init(shape):
        return np.random.normal(size=shape)

    inp = Input((2, 3, 4))
    out = SNConv2D(kernel_size=(3, 3),
                   padding='same',
                   filters=10,
                   kernel_initializer=kernel_init,
                   stateful=True)(inp)
    m = Model([inp], [out])
    x = np.arange(5 * 2 * 3 * 4).reshape((5, 2, 3, 4))
    for i in range(100):
        m.predict([x])

    kernel = K.get_value(m.layers[1].kernel)
    u_val = K.get_value(m.layers[1].u)

    kernel = kernel.reshape((-1, kernel.shape[3]))

    _, s, _ = svd(kernel)

    w = K.placeholder(kernel.shape)
    u = K.placeholder(u_val.shape)
    max_sg_fun = K.function([w, u], [max_singular_val(w, u)[0]])

    assert np.abs(max_sg_fun([kernel, u_val]) - s[0])[0] < 1e-5
コード例 #19
0
    def build_inverse_code_model(self, param):
        print('building inverse code model...')

        input_code = Input(shape=(param.get('code_size'), ), name='inv_input')
        #x = Dense(param.get('code_size'), activation='relu')(input_code)
        #x = Dense(param.get('code_size') * 10, activation='relu')(x)
        #x = Dropout(0.2)(x)
        #x = Dense(param.get('code_size') * 10, activation='relu')(x)

        x = Dense(param.get('code_size'))(input_code)
        x = Dropout(0.1)(x)
        x = Dense(param.get('code_size') * 10)(x)
        x = Dropout(0.1)(x)
        x = Dense(param.get('code_size') * 10)(x)

        #x = Dropout(0.2)(x)
        #command = Dense(param.get('romi_input_dim'), activation=self.activation_positive_tanh, name='command')(x)
        #command = Dense(param.get('romi_input_dim'), activation='sigmoid', name='command')(x)
        #command = Dense(param.get('romi_input_dim'), activation='sigmoid', name='command')(x)
        command = Dense(param.get('romi_input_dim'), name='command')(x)

        inv_model = Model(input_code, command)
        #sgd = optimizers.SGD(lr=0.0014, decay=0.0, momentum=0.8, nesterov=True)
        #inv_model.compile(optimizer=sgd, loss='mean_squared_error')
        inv_model.compile(optimizer='adadelta', loss='mean_squared_error')
        if (param.get('verbosity_level') > 2):
            print('inverse code model')
            inv_model.summary()
        return inv_model
コード例 #20
0
 def __init__(self, image_size, l1_weigh_penalty=100, **kwargs):
     self.gt_image_placeholder = Input(image_size + (3, ))
     super(CGAN, self).__init__(**kwargs)
     self.l1_weight_penalty = l1_weigh_penalty
     self.additional_inputs_for_generator_train = [
         self.gt_image_placeholder
     ]
コード例 #21
0
ファイル: unet.py プロジェクト: GustavoChichanoski/Covid3D
 def model(self) -> Model:
     if self._lazy_model is None:
         kernel_size = (3, 3)
         store_layers = {}
         inputs = Input(self.input_size)
         first_layers = inputs
         params = {'kernel_size': kernel_size,
                   'activation': self.activation}
         for i in range(self.depth):
             filters = (2**i) * self.filter_root
             layer = unet_conv(
                 layer=first_layers,
                 filters=filters,
                 depth=i,
                 name="Down",
                 **params
             )
             if i < self.depth - 1:
                 store_layers[str(i)] = layer
                 first_layers = MaxPooling2D(
                     pool_size=(2, 2),
                     padding='same',
                     name=f'MaxPooling{i}_0'
                 )(layer)
                 if self.dropout > 0:
                     first_layers = Dropout(
                         rate=self.dropout, name=f'Down_Drop_{i}'
                     )(first_layers)
             else:
                 first_layers = layer
         for i in range(self.depth-2, -1, -1):
             filters = (2**i) * self.filter_root
             connection = store_layers[str(i)]
             layer = up_conct(
                 layer=first_layers,
                 connection=connection,
                 depth=self.depth - i
             )
             if self.dropout > 0:
                     layer = Dropout(
                         rate=self.dropout, name=f'Up_Drop_{i}'
                     )(layer)
             layer = unet_conv(
                 layer=layer,
                 filters=filters,
                 depth=self.depth - i,
                 name="Up",
                 **params
             )
             first_layers = layer
         layer = Dropout(0.33, name='Drop_1')(layer)
         outputs = Conv2D(
             filters=self.n_class,
             kernel_size=(1, 1),
             padding='same',
             activation=self.final_activation,
             name='output'
         )(layer)
         self._lazy_model = Model(inputs, outputs, name="UNet")
     return self._lazy_model
コード例 #22
0
def test_sn_emb():
    import numpy as np
    from numpy.linalg import svd

    def kernel_init(shape):
        return np.random.normal(size=shape)

    np.random.seed(0)
    inp = Input((1, ), dtype='int32')
    out = SNEmbeding(input_dim=10,
                     output_dim=10,
                     embeddings_initializer=kernel_init,
                     stateful=True)(inp)
    m = Model([inp], [out])
    cls_val = (np.arange(5) % 3)[:, np.newaxis]
    for i in range(100):
        m.predict([cls_val])
    kernel = K.get_value(m.layers[1].embeddings)
    u_val = K.get_value(m.layers[1].u)

    _, s, _ = svd(kernel)

    w = K.placeholder(kernel.shape)
    u = K.placeholder(u_val.shape)
    max_sg_fun = K.function([w, u], [max_singular_val(w, u)[0]])
    assert np.abs(max_sg_fun([kernel, u_val]) - s[0])[0] < 1e-5
コード例 #23
0
def test_sn_dense():

    import numpy as np
    from numpy.linalg import svd

    def kernel_init(shape):
        return np.random.normal(size=shape)

    inp = Input((5, ))
    out = SNDense(units=10, kernel_initializer=kernel_init, stateful=True)(inp)
    m = Model([inp], [out])
    x = np.arange(5 * 10).reshape((10, 5))
    for i in range(50):
        m.predict([x])

    kernel = K.get_value(m.layers[1].kernel)
    u_val = K.get_value(m.layers[1].u)

    _, s, _ = svd(kernel)

    w = K.placeholder(kernel.shape)
    u = K.placeholder(u_val.shape)
    max_sg_fun = K.function([w, u], [max_singular_val(w, u)[0]])

    assert np.abs(max_sg_fun([kernel, u_val]) - s[0])[0] < 1e-5
コード例 #24
0
def test_decorelation():

    import numpy as np

    def beta_init(shape):
        a = np.empty(shape)
        a[0] = 1
        a[1] = 2
        a[2] = 3
        return a

    K.set_learning_phase(1)
    inp = Input((10, 10, 64))
    decor_l = DecorelationNormalization(renorm=False)
    decor = decor_l(inp)
    decor_l.stateful = True
    out = decor

    m = Model([inp], [out])

    cov = 0.5 * np.eye(64) + 0.5 * np.ones((64, 64))

    x = np.random.multivariate_normal(mean=np.ones(64),
                                      cov=cov,
                                      size=(10, 10, 10))
    out = m.predict(x)

    out = np.reshape(out, [-1, out.shape[-1]])
    #print np.cov(out, rowvar=False)
    assert (np.mean(np.abs(np.cov(out, rowvar=False) - np.eye(64))) < 1e-3)
コード例 #25
0
def demo_create_discriminator(latent_dim):
    input_layer = Input(shape=(latent_dim,))
    disc = Dense(128)(input_layer)
    disc = ELU()(disc)
    disc = Dense(64)(disc)
    disc = ELU()(disc)
    disc = Dense(1, activation="sigmoid")(disc)
    
    model = Model(input_layer, disc)
    return model
コード例 #26
0
 def mseG(self, g_model):
     cond_image = Input(shape = self.input_shape)
     # connect the source image to the generator input
     g_out = g_model(cond_image)
     # src image as input, generated image and classification output
     model = Model(cond_image, g_out)
     # compile model
     opt = Adam(lr=0.0002, beta_1=0.5)
     model.compile(loss=[ 'mse'], optimizer=opt) ##############wtf
     return model
コード例 #27
0
    def generator(self):
        # weight initialization
        init = RandomNormal(stddev=0.02)
        # image input
        in_image = Input(shape=self.input_shape)
        # c7s1-64
        g = Conv2D(self.n_filters, (11,11), padding='same', kernel_initializer=init)(in_image)
        g = Activation('tanh')(g)

        g = Conv2D(2*self.n_filters, (7,7), strides=(2,2), padding='same', kernel_initializer=init)(g)
        g = Activation('tanh')(g)

        g = Conv2D(4*self.n_filters, (5,5), padding='same', kernel_initializer=init)(g)
        g = Activation('tanh')(g)

        g = Conv2D(4*self.n_filters, (3,3), padding='same', kernel_initializer=init)(g)
        g = Activation('tanh')(g)

        g = Conv2D(4*self.n_filters, (5,5), strides=(2,2), padding='same', kernel_initializer=init)(g)
        g = Activation('tanh')(g)

        g = Conv2D(4*self.n_filters, (3,3), padding='same', kernel_initializer=init)(g)
        g = Activation('tanh')(g)

        g = Conv2D(4*self.n_filters, (3,3), padding='same', kernel_initializer=init)(g)
        g = Activation('tanh')(g)

        #for _ in range(self.n_resblocks):
        #    g = self.resnet_block(g)

        g = Conv2DTranspose(4*self.n_filters, (3,3), padding='same', kernel_initializer=init)(g)
        g = Activation('tanh')(g)

        g = Conv2DTranspose(4*self.n_filters, (3,3), padding='same', kernel_initializer=init)(g)
        g = Activation('tanh')(g)

        g = Conv2DTranspose(4*self.n_filters, (5,5), strides=(2,2), padding='same', kernel_initializer=init)(g)
        g = Activation('tanh')(g)

        g = Conv2DTranspose(4*self.n_filters, (3,3), padding='same', kernel_initializer=init)(g)
        g = Activation('tanh')(g)

        g = Conv2DTranspose(4*self.n_filters, (5,5), padding='same', kernel_initializer=init)(g)
        g = Activation('tanh')(g)

        g = Conv2DTranspose(2*self.n_filters, (7,7), strides=(2,2), padding='same', kernel_initializer=init)(g)
        g = Activation('tanh')(g)

        # c7s1-3
        g = Conv2D(1, (11,11), padding='same', kernel_initializer=init)(g)
        #g = InstanceNormalization(axis=-1)(g)
        out_image = Activation('tanh')(g)
        # define model
        model = Model(in_image, out_image)
        return model
コード例 #28
0
    def getModel(self, nwords, nchars, ntags, max_len, max_len_char,
                 embedding_matrix):
        word_in = Input(shape=(max_len, ))

        emb_word = Embedding(nwords + 1,
                             len(embedding_matrix[0]),
                             weights=[embedding_matrix],
                             input_length=max_len,
                             trainable=False)(word_in)

        # input and embeddings for characters
        char_in = Input(shape=(
            max_len,
            max_len_char,
        ))
        emb_char = TimeDistributed(
            Embedding(input_dim=nchars + 2,
                      output_dim=10,
                      input_length=max_len_char,
                      mask_zero=True))(char_in)

        # character LSTM to get word encodings by characters
        char_enc = TimeDistributed(
            LSTM(units=20, return_sequences=False,
                 recurrent_dropout=0.68))(emb_char)

        x = concatenate([emb_word, char_enc])
        x = SpatialDropout1D(0.3)(x)
        main_lstm = Bidirectional(
            LSTM(units=50, return_sequences=True, recurrent_dropout=0.68))(x)
        out = TimeDistributed(Dense(ntags + 1,
                                    activation="softmax"))(main_lstm)

        model = Model([word_in, char_in], out)
        model.compile(optimizer="adam",
                      loss="sparse_categorical_crossentropy",
                      metrics=["acc"])

        model.summary()

        return model
コード例 #29
0
def content_features_model(image_size, layer_name='block4_conv1'):
    from tensorflow.python.keras.applications import vgg19
    x = Input(list(image_size) + [3])

    def preprocess_for_vgg(x):
        x = 255 * (x + 1) / 2
        mean = np.array([103.939, 116.779, 123.68])
        mean = mean.reshape((1, 1, 1, 3))
        x = x - mean
        x = x[..., ::-1]
        return x

    x = Input((128, 64, 3))
    y = Lambda(preprocess_for_vgg)(x)
    vgg = vgg19.VGG19(weights='imagenet', include_top=False, input_tensor=y)
    outputs_dict = dict([(layer.name, layer.output) for layer in vgg.layers])
    if type(layer_name) == list:
        y = [outputs_dict[ln] for ln in layer_name]
    else:
        y = outputs_dict[layer_name]
    return Model(inputs=x, outputs=y)
コード例 #30
0
 def discriminator(self):
     # weight initialization
     init = RandomNormal(stddev=0.02)
     # source image input
     cond_image = Input(shape= self.input_shape)
     # target image input
     y_image = Input(shape= self.input_shape)
     # concatenate images channel-wise
     merged = Concatenate()([cond_image, y_image])
     d = Conv2D(self.n_filters, (4,4), strides=(3,3), padding='same', kernel_initializer=init)(merged)
     d = LeakyReLU(alpha=0.2)(d)
     d = Conv2D(4*self.n_filters, (4,4), strides=(2,2), padding='same', kernel_initializer=init)(d)
     d = LeakyReLU(alpha=0.2)(d)
     # patch output
     d = Conv2D(1, (4,4), padding='same', kernel_initializer=init)(d)
     patch_out = Activation('sigmoid')(d)
     # define model
     model = Model([cond_image, y_image], patch_out)
     # compile model
     opt = Adam(lr=0.0001, beta_1=0.5)
     model.compile(loss='binary_crossentropy', optimizer=opt)
     return model