コード例 #1
0
# We reuse the same layer to encode both inputs
encoded_input_a = shared_embedding(text_input_a)
encoded_input_b = shared_embedding(text_input_b)
"""## Extracting and reusing nodes in the graph of layers

Because the graph of layers you are manipulating in the Functional API is a static datastructure, it can be accessed and inspected. This is how we are able to plot Functional models as images, for instance.

This also means that we can access the activations of intermediate layers ("nodes" in the graph) and reuse them elsewhere. This is extremely useful for feature extraction, for example!

Let's look at an example. This is a VGG19 model with weights pre-trained on ImageNet:
"""

from tensorflow.python.keras.applications import VGG19

vgg19 = VGG19()
"""And these are the intermediate activations of the model, obtained by querying the graph datastructure:"""

features_list = [layer.output for layer in vgg19.layers]
"""We can use these features to create a new feature-extraction model, that returns the values of the intermediate layer activations -- and we can do all of this in 3 lines."""

feat_extraction_model = keras.Model(inputs=vgg19.input, outputs=features_list)

img = np.random.random((1, 224, 224, 3)).astype('float32')
extracted_features = feat_extraction_model(img)
"""This comes in handy when [implementing neural style transfer](https://medium.com/tensorflow/neural-style-transfer-creating-art-with-deep-learning-using-tf-keras-and-eager-execution-7d541ac31398), among other things.

## Extending the API by writing custom layers

tf.keras has a wide range of built-in layers. Here are a few examples:
コード例 #2
0
    def __init__(self):
        # Input shape
        self.img_rows = 256
        self.img_cols = 256
        self.channels = 3
        self.img_shape = (self.img_rows, self.img_cols, self.channels)

        # Configure data loader
        self.dataset_name = 'facades'
        self.data_loader = DataLoader(dataset_name=self.dataset_name,
                                      img_res=(self.img_rows, self.img_cols))


        # Calculate output shape of D (PatchGAN)
        patch = int(self.img_rows / 2**4)
        self.disc_patch = (patch, patch, 1)

        # Number of filters in the first layer of G and D
        self.gf = 64
        self.df = 64

        optimizer = Adam(0.0002, 0.5)

        # Build and compile the discriminator
        self.discriminator = self.build_discriminator()
        self.discriminator.compile(loss='mse',
            optimizer=optimizer,
            metrics=['accuracy'])

        #-------------------------
        # Construct Computational
        #   Graph of Generator
        #-------------------------

        # Build the generator
        self.generator = self.build_generator()

        # Input images and their conditioning images
        img_A = Input(shape=self.img_shape)
        img_B = Input(shape=self.img_shape)

        # By conditioning on B generate a fake version of A
        fake_A = self.generator(img_B)

        # For the combined model we will only train the generator
        #self.discriminator.trainable = False

        # Discriminators determines validity of translated images / condition pairs
        valid = self.discriminator([fake_A, img_B])
 
       
        self.combined = Model(inputs=[img_A, img_B], outputs=[valid, fake_A])
        self.combined.compile(loss=['mse', smoothL1],
                              loss_weights=[1, 100],
                              optimizer=optimizer)
        
        ################# Perceptual loss and L1 loss ######################
        self.vggmodel=VGG19(weights="vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5",include_top=False)
        #print(vggmodel.get_layer('block4_pool'))
        #print(self.combined.output[1])
        #print(vggmodel.get_layer('block4_pool').output)
        #lossOut = vggmodel(inputs=self.combined.output[1], output = vggmodel.get_layer('block4_pool').output)
        lossOut = self.vggmodel(inputs=self.combined.output[1])

        self.vggmodel.trainable = False
        for l in self.vggmodel.layers:
            l.trainable = False

        self.vgg_combined = Model(inputs=self.combined.input, outputs=lossOut)
        self.vgg_combined.compile(loss='mse',optimizer='adam')
         
        valid.trainable = False
コード例 #3
0
    y_test = keras.utils.to_categorical(y_test, num_classes=10)

    train_gen = ImageDataGenerator(featurewise_std_normalization=True,
                                   rotation_range=20,
                                   width_shift_range=0.2,
                                   height_shift_range=0.2,
                                   horizontal_flip=True)

    test_gen = ImageDataGenerator(featurewise_std_normalization=True)

    train_gen.fit(x_train)
    test_gen.fit(x_test)

    inp = keras.layers.Input(shape=(32, 32, 3), name='image_input')

    vgg_model = VGG19(weights='imagenet', include_top=False)
    # vgg_model.trainable = False
    for layer in vgg_model.layers:
        layer.trainable = False
    vgg_model = vgg_model(inp)

    x = keras.layers.Flatten(name='flatten')(vgg_model)
    x = keras.layers.Dense(512, activation='relu', name='fc1')(x)
    x = keras.layers.Dense(512, activation='relu', name='fc2')(x)
    x = keras.layers.Dense(10, activation='softmax', name='predictions')(x)

    new_model = keras.models.Model(inputs=inp, outputs=x)
    new_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
    new_model.summary()

    new_model.fit_generator(train_gen.flow(x_train, y_train, batch_size=64),
target_size = (image_size, image_size)
"""
we set up a sequential model that we can add layers to
"""
my_new_model = Sequential()
"""
first we add all of pre-trained  model
we've written include_top=False, this is how specify that we want to exlude
the layer that makes prediction into the thousands of categories used in the ImageNet competition
we set the weights to be 'ImageNet' to specify that we use the pre-traind model on ImageNet
pooling equals average says that if we had extra channels in our tensor at the end of this step
we want to collapse them to 1d tensor by taking an average across channels
now we have a pre-trained model that creates the layer before the last layer
that we saw in the slides
"""
my_new_model.add(VGG19(weights='imagenet', include_top=False, pooling='avg'))
"""
we add a dense layer to make predictions,
we specify the number of nodes in this layer which in this case is
the number of classes,
then we want to apply the softmax function to turn it into probabilities 
"""
my_new_model.add(Dense(
    num_classes,
    activation='softmax',
))
"""
we tell tensor flow not to train the first layer which is the  pre-trained model
because that's the model that was already pre-trained with the ImageNet data
"""
my_new_model.layers[0].trainable = False