Example #1
0
    def test_fixed_loss_scaling(self,
                                strategy_fn,
                                experimental_run_tf_function=True):
        # Note: We do not test mixed precision in this method, only loss scaling.
        if not self._is_strategy_supported(strategy_fn):
            return
        loss_scale = 8.
        batch_size = 4
        with strategy_fn().scope():
            x = layers.Input(shape=(1, ), batch_size=batch_size)
            layer = AddLayer()
            y = layer(x)

            # The gradient of 'y' at this point is 1. With loss scaling, the gradient
            # is 'loss_scale'. We divide by the batch size since the loss is averaged
            # across batch elements.
            expected_gradient = loss_scale / batch_size
            identity_with_grad_check_fn = (
                mp_test_util.create_identity_with_grad_check_fn(
                    [expected_gradient]))
            y = core.Lambda(identity_with_grad_check_fn)(y)
            model = models.Model(inputs=x, outputs=y)

            def loss_fn(y_true, y_pred):
                del y_true
                return math_ops.reduce_mean(y_pred)

            opt = gradient_descent.SGD(1.)
            opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
            model.compile(opt,
                          loss=loss_fn,
                          run_eagerly=testing_utils.should_run_eagerly(),
                          experimental_run_tf_function=testing_utils.
                          should_run_tf_function())

        self.assertEqual(backend.eval(layer.v), 1)
        x = np.ones((batch_size, 1))
        y = np.ones((batch_size, 1))
        dataset = dataset_ops.Dataset.from_tensor_slices(
            (x, y)).batch(batch_size)
        model.fit(dataset)
        # Variable starts at 1, and should have gradient of 1 subtracted from it.
        expected = 0
        self.assertEqual(backend.eval(layer.v), expected)
Example #2
0
def ternaus_model_building(img_shape):
    """Ternaus model declaration"""
    inputs = layers.Input(shape=img_shape)
    model = VGG16(weights="imagenet", include_top=False, input_tensor=inputs)
    layer_dict = dict([(layer.name, layer) for layer in model.layers])
    for key in layer_dict:
        print(key)
        print(layer_dict[key])
        print(layer_dict[key].output_shape)
        layer_dict[key].trainble = False

    def decoder_block(input_tensor,
                      concat_tensor,
                      num_filters_a,
                      num_filters_b,
                      up_scale=2):
        decoder = layers.Conv2DTranspose(num_filters_a, (3, 3),
                                         strides=(up_scale, up_scale),
                                         padding='same')(input_tensor)
        decoder = layers.BatchNormalization()(decoder)
        decoder = layers.Activation("relu")(decoder)
        decoder = layers.concatenate([concat_tensor, decoder], axis=-1)
        decoder = layers.BatchNormalization()(decoder)
        decoder = layers.Conv2D(num_filters_b, (3, 3), padding='same')(decoder)
        decoder = layers.BatchNormalization()(decoder)
        decoder = layers.Activation("relu")(decoder)
        return decoder

    actual_inputs = layers.Conv2D(512, (3, 3), padding='same')(
        layer_dict['block5_pool'].output)
    actual_inputs = layers.BatchNormalization()(actual_inputs)
    actual_inputs = decoder_block(actual_inputs,
                                  layer_dict['block5_conv3'].output, 256, 512)
    actual_inputs = decoder_block(actual_inputs,
                                  layer_dict['block4_conv3'].output, 256, 512)
    actual_inputs = decoder_block(actual_inputs,
                                  layer_dict['block3_conv3'].output, 128, 256)
    actual_inputs = decoder_block(actual_inputs,
                                  layer_dict['block2_conv2'].output, 64, 128)
    final = decoder_block(actual_inputs, layer_dict['block1_conv2'].output, 32,
                          1)

    model = models.Model(inputs=[inputs], outputs=[final])
    return model
def network_1b():
    """
    将原文网络最后的FC层改为512个神经元
    acc=0, size=, time=0
    :return: model
    """
    inputs = kl.Input(shape=(8, 16, 1))
    bone = kl.BatchNormalization(1)(inputs)  # modified: 加入BN层,极大加快收敛速度并提高准确率
    bone = kl.Conv2D(filters=32,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.MaxPool2D(pool_size=(2, 1), strides=(2, 1), padding='same')(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.MaxPool2D(pool_size=(2, 1), strides=(2, 1), padding='same')(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.Conv2D(filters=16,
                     kernel_size=(2, 1),
                     padding='same',
                     activation='relu',
                     strides=(1, 1))(bone)
    bone = kl.Flatten()(bone)
    bone = kl.Dense(units=512, activation='relu')(bone)
    outputs = kl.Dense(units=6, activation='softmax')(bone)
    model = km.Model(inputs=inputs, outputs=outputs)
    model.summary()
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
Example #4
0
def trivial(num_classes, batch_size=None, use_l2_regularizer=True):
    input_shape = (224, 224, 3)
    img_input = layers.Input(shape=input_shape, batch_size=batch_size)
    x = img_input

    if backend.image_data_format() == 'channels_first':
        x = layers.Lambda(
            lambda x: backend.permute_dimensions(x, (0, 3, 1, 2)),
            name='transpose')(x)
        bn_axis = 1
    else:  # channels_last
        bn_axis = 3

    x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(x)
    x = layers.Conv2D(
        64, (7, 7),
        strides=(2, 2),
        padding='valid',
        use_bias=False,
        kernel_initializer='he_normal',
        kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
        name='conv1')(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  momentum=BATCH_NORM_DECAY,
                                  epsilon=BATCH_NORM_EPSILON,
                                  name='bn_conv1')(x)

    rm_axes = [1, 2
               ] if backend.image_data_format() == 'channels_last' else [2, 3]
    x = layers.Lambda(lambda x: backend.mean(x, rm_axes),
                      name='reduce_mean')(x)
    x = layers.Dense(
        num_classes,
        kernel_initializer=initializers.RandomNormal(stddev=0.01),
        kernel_regularizer=_gen_l2_regularizer(use_l2_regularizer),
        bias_regularizer=_gen_l2_regularizer(use_l2_regularizer),
        name='fc1000')(x)

    # A softmax that is followed by the model loss must be done cannot be done
    # in float16 due to numeric issues. So we pass dtype=float32.
    x = layers.Activation('softmax', dtype='float32')(x)

    # Create model.
    return models.Model(img_input, x, name='resnet50')
def get_model(style_layers, content_layers):
    # Load the model without the fully connected layers and pretrained on the imagenet dataset
    vgg19 = tf.keras.applications.vgg19.VGG19(include_top=False,
                                              weights=WEIGHTS)

    # Set trainable to false as we don't need to train the network
    vgg19.trainable = False

    # Get output layers corresponding to style and content Layers
    style_outputs = [vgg19.get_layer(layer).output for layer in style_layers]
    content_outputs = [
        vgg19.get_layer(layer).output for layer in content_layers
    ]

    # Combining the output layers of Interest
    model_outputs = style_outputs + content_outputs

    # Build and return the model
    return models.Model(vgg19.input, model_outputs)
def build_model():

    input1 = layers.Input(shape=(X_unstructure.shape[1], ))
    input2 = layers.Input(shape=(X_structure.shape[1], ))

    input1_bn = layers.BatchNormalization()(input1)
    x1_1 = layers.Dense(600, activation='tanh')(input1_bn)
    input2_bn = layers.BatchNormalization()(input2)
    x2_1 = layers.Dense(200, activation='tanh')(input2_bn)

    x_connect = tf.concat([x1_1, x2_1], 1)
    x_connect_1 = layers.Dense(600, activation='tanh')(x_connect)
    x_connect_2 = layers.Dense(400, activation='tanh')(x_connect_1)
    x_connect_2_d = layers.Dropout(0.3)(x_connect_2)
    y_connect_ = layers.Dense(2, activation='softmax')(x_connect_2_d)

    x1_2 = layers.Dense(600,
                        activation='tanh',
                        kernel_regularizer=regularizers.l2())(x1_1)
    x2_2 = layers.Dense(400,
                        activation='tanh',
                        kernel_regularizer=regularizers.l2())(x2_1)
    x1_2_d = layers.Dropout(0.3)(x1_2)
    x2_2_d = layers.Dropout(0.3)(x2_2)

    x1_3 = layers.Dense(600,
                        activation='tanh',
                        kernel_regularizer=regularizers.l2())(x1_2)
    x2_3 = layers.Dense(400,
                        activation='tanh',
                        kernel_regularizer=regularizers.l2())(x2_2)
    x1_3_d = layers.Dropout(0.3)(x1_3)
    x2_3_d = layers.Dropout(0.3)(x2_3)

    y1_ = layers.Dense(2, activation='softmax')(x1_3)
    y2_ = layers.Dense(2, activation='softmax')(x2_3)

    model = models.Model(inputs=[input1, input2],
                         outputs=[y_connect_, y1_, y2_])
    sgd = optimizers.SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True)
    #sgd = optimizers.SGD()
    model.compile(optimizer=sgd, loss=total_loss, metrics=['accuracy'])
    return model
Example #7
0
    def plot_intermediate_outputs(self, num_layers: int = 12, images_per_row: int = 12, save: bool = False):

        print('\nVisualizing the intermediate outputs of the first {} layers...'.format(num_layers))

        if (save and check_existing_folder('outputs_visualization')) or not save:
            # Collect the name of the layers for the plot
            layer_names = [layer.name for layer in self.__model.layers[10:num_layers]]

            # Extract the outputs of the layers
            layer_outputs = [layer.output for layer in self.__model.layers[:num_layers]]

            # Create a model that will return the given outputs on the base of the model input
            activation_model = models.Model(inputs=self.__model.input, outputs=layer_outputs)

            # Perform a prediction on the test image using the new model
            activations = activation_model.predict(self.__img)

            # Display the activations in a grid
            self.__display_grid(layer_names, activations, images_per_row, save)
Example #8
0
def get_model():
    """ Creates our model with access to intermediate layers.
  This function will load the VGG19 model and access the intermediate layers.
  These layers will then be used to create a new model that will take input image
  and return the outputs from these intermediate layers from the VGG model.
 
  Returns:
    returns a keras model that takes image inputs and outputs the style and
      content intermediate layers.
  """
    # Load our model. We load pretrained VGG, trained on imagenet data
    vgg = keras.applications.vgg19.VGG19(include_top=False, weights='imagenet')
    vgg.trainable = False
    # Get output layers corresponding to style and content layers
    style_outputs = [vgg.get_layer(name).output for name in style_layers]
    content_outputs = [vgg.get_layer(name).output for name in content_layers]
    model_outputs = style_outputs + content_outputs
    # Build model
    return models.Model(vgg.input, model_outputs)
def build_model():
    sequences = layers.Input(shape=(MAX_LENGTH, ))
    embedded = layers.Embedding(MAX_FEATURES, 64)(sequences)
    x = layers.Conv1D(64, 3, activation='relu')(embedded)
    x = layers.BatchNormalization()(x)
    x = layers.MaxPool1D(3)(x)
    x = layers.Conv1D(64, 5, activation='relu')(x)
    x = layers.BatchNormalization()(x)
    x = layers.MaxPool1D(5)(x)
    x = layers.Conv1D(64, 5, activation='relu')(x)
    x = layers.GlobalMaxPool1D()(x)
    x = layers.Flatten()(x)
    x = layers.Dense(100, activation='relu')(x)
    predictions = layers.Dense(1, activation='sigmoid')(x)
    model = models.Model(inputs=sequences, outputs=predictions)
    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['binary_accuracy'])
    return model
Example #10
0
    def build_generator(self):
        dim = self.image_size[0]
        mult = dim // 8

        x = inputs = layers.Input((1, 1, self.z_dim))
        x = ops.UpConv2D(dim // 2 * mult, 4, 1, 'valid')(x)
        x = ops.BatchNorm()(x)
        x = layers.ReLU()(x)

        while mult > 1:
            x = ops.UpConv2D(dim // 2 * (mult // 2))(x)
            x = ops.BatchNorm()(x)
            x = layers.ReLU()(x)

            mult //= 2

        x = ops.UpConv2D(3)(x)
        x = layers.Activation('tanh')(x)
        return models.Model(inputs, x, name='Generator')
Example #11
0
    def build_discriminator(self):
        dim = self.image_size[0]
        mult = 1
        i = dim // 2

        x = inputs = layers.Input((dim, dim, 3))
        x = ops.Conv2D(dim // 2)(x)
        x = ops.LeakyRelu()(x)

        while i > 4:
            x = ops.Conv2D(dim // 2 * (2 * mult))(x)
            x = ops.LayerNorm(axis=[1, 2, 3])(x)
            x = ops.LeakyRelu()(x)

            i //= 2
            mult *= 2

        x = ops.Conv2D(1, 4, 1, 'valid')(x)
        return models.Model(inputs, x, name='Discriminator')
def network_1c():
    """
    BN+FC(512)+FC(6)
    (实验证明FC512效果优于FC1024)
    acc=0.96~0.99, size=841kb, time=0.03s
    :return: model
    """
    inputs = kl.Input(shape=(8, 16, 1))
    bone = kl.BatchNormalization(1)(inputs)

    bone = kl.Flatten()(bone)
    bone = kl.Dense(units=512, activation='relu')(bone)
    outputs = kl.Dense(units=6, activation='softmax')(bone)
    model = km.Model(inputs=inputs, outputs=outputs)
    model.summary()
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
def get_multiclass_model(depth, nclasses, optim, loss, mets, bias=None):
    """
    Build a U-Net model
    Parameters:
        depth (int): number of training features (i.e. bands)
        nclasses (int): number of output classes
        optim (tf.keras.optimizer): keras optimizer
        loss (tf.keras.loss): keras or custom loss function
        mets (dict<tf.keras.metrics): dictionary of metrics for logits and classes. elements are lists of keras metrics
    Returns:
        tf.keras.model: compiled U-Net model
    """
    if bias is not None:
        bias = tf.keras.initializers.Constant(bias)

    inputs = layers.Input(shape=[None, None, depth])  # 256
    encoder0_pool, encoder0 = encoder_block(inputs, 32)  # 128
    encoder1_pool, encoder1 = encoder_block(encoder0_pool, 64)  # 64
    encoder2_pool, encoder2 = encoder_block(encoder1_pool, 128)  # 32
    encoder3_pool, encoder3 = encoder_block(encoder2_pool, 256)  # 16
    encoder4_pool, encoder4 = encoder_block(encoder3_pool, 512)  # 8
    center = conv_block(encoder4_pool, 1024)  # center
    decoder4 = decoder_block(center, encoder4, 512)  # 16
    decoder3 = decoder_block(decoder4, encoder3, 256)  # 32
    decoder2 = decoder_block(decoder3, encoder2, 128)  # 64
    decoder1 = decoder_block(decoder2, encoder1, 64)  # 128
    decoder0 = decoder_block(decoder1, encoder0, 32)  # 256
    outputs = layers.Conv2D(nclasses, (1, 1),
                            activation='softmax',
                            name='softmax')(decoder0)
    # logits = layers.Conv2D(1, (1, 1), activation='sigmoid', bias_initializer = bias, name = 'logits')(decoder0)
    # logits is a probability and classes is binary. in solar, "tf.cast(tf.greater(x, 0.9)" was used  to avoid too many false positives
    # classes = layers.Lambda(lambda x: tf.cast(tf.greater(x, 0.5), dtype = tf.int32), name = 'classes')(logits)
    # model = models.Model(inputs=[inputs], outputs=[logits, classes])
    model = models.Model(inputs=[inputs], outputs=[outputs])

    model.compile(
        optimizer=optim,
        loss={'softmax': loss},
        #loss=losses.get(LOSS),
        metrics=mets)

    return model
Example #14
0
    def _get_keras_model(self) -> models.Model:
        I = layers.Input(shape=(None, self._embedding_size),
                         dtype='float32',
                         name=base_model.TOKENS_FEATURE_KEY)

        # Bidirectional GRU
        H = I
        for num_units in self.hparams().gru_units:
            H = layers.Bidirectional(
                layers.GRU(num_units, return_sequences=True))(I)

        # Attention
        last_gru_units = self.hparams(
        ).gru_units[-1] * 2  # x2 because bidirectional
        A = layers.TimeDistributed(layers.Dense(self.hparams().attention_units,
                                                activation='relu'),
                                   input_shape=(None, last_gru_units))(H)
        A = layers.TimeDistributed(layers.Dense(1))(A)
        A = layers.Flatten()(A)
        A = layers.Activation('softmax')(A)

        # Dense
        X = layers.Dot((1, 1))([H, A])
        X = layers.Flatten()(X)
        for num_units in self.hparams().dense_units:
            X = layers.Dense(num_units, activation='relu')(X)
            X = layers.Dropout(self.hparams().dropout_rate)(X)

        # Outputs
        outputs = []
        for label in self._labels:
            outputs.append(
                layers.Dense(1, activation='sigmoid', name=label)(X))

        model = models.Model(inputs=I, outputs=outputs)
        model.compile(
            optimizer=optimizers.Adam(lr=self.hparams().learning_rate),
            loss='binary_crossentropy',
            metrics=['binary_accuracy', super().roc_auc])

        tf.logging.info(model.summary())
        return model
    def build_model(self):
        """Build an actor (policy) network that maps states -> actions."""
        # Define input layer (states)
        states = layers.Input(shape=(self.state_size, ), name='states')

        # Add hidden layers
        net = layers.Dense(units=32, activation='relu')(states)
        net = layers.Dropout(0.8)(net)
        net = layers.Dense(units=64, activation='relu')(net)
        net = layers.Dropout(0.8)(net)
        net = layers.Dense(units=32, activation='relu')(net)

        # Try different layer sizes, activations, add batch normalization, regularizers, etc.

        # Add final output layer with sigmoid activation
        raw_actions = layers.Dense(units=self.action_size,
                                   activation='sigmoid',
                                   name='raw_actions')(net)

        # Scale [0, 1] output for each action dimension to proper range
        actions = layers.Lambda(lambda x:
                                (x * self.action_range) + self.action_low,
                                name='actions')(raw_actions)

        # Create Keras model
        self.model = models.Model(inputs=states, outputs=actions)

        # Define loss function using action value (Q value) gradients
        action_gradients = layers.Input(shape=(self.action_size, ))
        loss = K.mean(-action_gradients * actions)

        # Incorporate any additional losses here (e.g. from regularizers)

        # Define optimizer and training function
        optimizer = optimizers.Adam(lr=0.0001)
        updates_op = optimizer.get_updates(params=self.model.trainable_weights,
                                           loss=loss)
        self.train_fn = K.function(
            inputs=[self.model.input, action_gradients,
                    K.learning_phase()],
            outputs=[],
            updates=updates_op)
Example #16
0
    def test_save_weights_with_dynamic_loss_scaling(self, strategy_fn):
        if not self._is_strategy_supported(strategy_fn):
            return
        strategy = strategy_fn()
        if (isinstance(strategy, mirrored_strategy.MirroredStrategy)
                and not context.executing_eagerly()):
            # TODO(b/121381184): Enable running the test in this case.
            return

        # Create and run model.
        with strategy.scope():
            x = layers.Input(shape=(2, ), batch_size=2, dtype=dtypes.float32)
            y = AddLayer(assert_type=dtypes.float32)(x)
            model = models.Model(inputs=x, outputs=y)

            loss_scale = loss_scale_module.DynamicLossScale(
                initial_loss_scale=1., increment_period=2., multiplier=2.)
            opt = gradient_descent.SGD(1.)
            opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
            model.compile(
                optimizer=opt,
                loss='mse',
                run_eagerly=testing_utils.should_run_eagerly(),
                run_distributed=testing_utils.should_run_distributed())
        # Run for 3 steps (6 examples with a batch size of 2)
        model.fit(np.zeros((6, 2)), np.zeros((6, 2)), batch_size=2)
        self.assertEqual(backend.get_value(loss_scale()), 2)
        self.assertEqual(backend.get_value(loss_scale._num_good_steps), 1)

        # Save model weights.
        save_prefix = os.path.join(self.get_temp_dir(), 'ckpt')
        model.save_weights(save_prefix)

        # Run model again for 1 step (2 examples with a batch size of 2)
        model.fit(np.zeros((2, 2)), np.zeros((2, 2)), batch_size=2)
        self.assertEqual(backend.get_value(loss_scale()), 4)
        self.assertEqual(backend.get_value(loss_scale._num_good_steps), 0)

        # Load model weights and ensure loss scale weights are restored.
        model.load_weights(save_prefix)
        self.assertEqual(backend.get_value(loss_scale()), 2)
        self.assertEqual(backend.get_value(loss_scale._num_good_steps), 1)
Example #17
0
def encoder_decoder_model(img_shape, reg_enc=False, reg_dec=False):
    """Simple encoding decoding model (UNet). Encodes by factor 2**4 and
  decodes back.

  Arguments:
    img_shape: image shape as (height, width, channels)

    reg_enc: regularize encoding

    reg_dec: regularize decoding

  Returns:
    a keras model
  """
    inputs = layers.Input(shape=img_shape)
    # (256,256,3)
    encoder0 = encoder_block(inputs, 32, pool=False, reg=reg_enc)
    # (256,256,32)
    encoder1 = encoder_block(encoder0, 64, reg=reg_enc)
    # (128,128,64)
    encoder2 = encoder_block(encoder1, 128, reg=reg_enc)
    # (64,64,128)
    encoder3 = encoder_block(encoder2, 256, reg=reg_enc)
    # (32,32,256)
    encoder4 = encoder_block(encoder3, 512, reg=reg_enc)
    # (16,16,512)
    center = encoder_block(encoder4, 1024, reg=reg_enc)
    # (8,8,1024)
    decoder4 = decoder_block(center, encoder4, 512, reg=reg_dec)
    # (16,16,512)
    decoder3 = decoder_block(decoder4, encoder3, 256, reg=reg_dec)
    # (32,32,256)
    decoder2 = decoder_block(decoder3, encoder2, 128, reg=reg_dec)
    # (64,64,128)
    decoder1 = decoder_block(decoder2, encoder1, 64, reg=reg_dec)
    # (128,128,64)
    decoder0 = decoder_block(decoder1, encoder0, 32, reg=reg_dec)
    # (256,256,32)
    outputs = layers.Conv2D(1, (1, 1), activation='sigmoid')(decoder0)
    # (256,256,1)
    model = models.Model(inputs=[inputs], outputs=[outputs])
    return model
Example #18
0
    def build_model(self):
        if self.name == 'VGG19':
            # Load our model. We load pretrained VGG, trained on imagenet data
            vgg = tf.keras.applications.vgg19.VGG19(include_top=False,
                                                    weights='imagenet')
            vgg.trainable = False
            # Get output layers corresponding to style and content layers
            style_outputs = [
                vgg.get_layer(name).output for name in self.style_feat_layers
            ]
            content_outputs = [
                vgg.get_layer(name).output for name in self.content_feat_layers
            ]
            model_outputs = style_outputs + content_outputs
            # Build model
        else:
            print("Error: Not Support this model! Please Check!")

        self.model = models.Model(vgg.input, model_outputs)
        print(self.model.summary())
Example #19
0
def get_model(img_shape):
    inputs = layers.Input(shape=img_shape)

    encoder0_pool, encoder0 = encoder_block(inputs, 8)
    encoder1_pool, encoder1 = encoder_block(encoder0_pool, 16)
    encoder2_pool, encoder2 = encoder_block(encoder1_pool, 32)
    encoder3_pool, encoder3 = encoder_block(encoder2_pool, 64)

    center = conv_block(encoder3_pool, 128)

    decoder3 = decoder_block(center, encoder3, 64)
    decoder2 = decoder_block(decoder3, encoder2, 32)
    decoder1 = decoder_block(decoder2, encoder1, 16)
    decoder0 = decoder_block(decoder1, encoder0, 8)

    outputs = layers.Conv2D(1, (1, 1), activation='sigmoid')(decoder0)

    model = models.Model(inputs=[inputs], outputs=[outputs])

    return model
    def build_model(self) -> keras_models.Model:
        input_shape = keras_layers.Input(shape=(None, None, 3))
        kernel_size = (3, 3)

        x = input_shape
        for type_, name, filters in self.layers:
            if type_ == 'conv':
                x = self._get_conv_layer(filters, kernel_size, name)(x)
            else:
                pass
                # TODO max pooling

        # create model
        model = keras_models.Model(input_shape, x, name='vgg19')

        # load weight
        weight_path = self._get_weight()
        model.load_weights(weight_path)

        return model
Example #21
0
def get_model():
  """Creates a model with access to intermediate layers. 
  
  These layers will then be used to create a new model that will take the
  content image and return the outputs from these intermediate layers from the
  VGG model. 
  
  Returns:
    A keras model that takes image inputs and outputs the style and content
    intermediate layers.
  """

  vgg = vgg19.VGG19(include_top=False, weights='imagenet')
  vgg.trainable = False
 
  style_outputs = [vgg.get_layer(name).output for name in style_layers]
  content_outputs = [vgg.get_layer(name).output for name in content_layers]
  model_outputs = style_outputs + content_outputs

  return models.Model(vgg.input, model_outputs)
Example #22
0
    def test_save_weights_with_autocast_vars(self, strategy_fn, h5=False):
        with strategy_fn().scope():
            with policy.policy_scope('mixed_float16'):
                x = layers.Input(shape=(1, ), batch_size=2)
                layer = mp_test_util.MultiplyLayer(assert_type=dtypes.float16)
                y = layer(x)
                model = models.Model(inputs=x, outputs=y)

        model.set_weights([np.array(100.)])
        x = np.ones((2, 1))
        self.assertAllClose(backend.get_value(model(x)), x * 100.)
        suffix = '.h5' if h5 else ''
        weights_file = os.path.join(self.get_temp_dir(), 'weights' + suffix)
        model.save_weights(weights_file)

        model.set_weights([np.array(200.)])
        self.assertAllClose(backend.get_value(model(x)), x * 200.)
        model.load_weights(weights_file)
        self.assertAllClose(backend.get_value(model(x)), x * 100.)
        self.assertEqual(model.get_weights(), [np.array(100.)])
Example #23
0
def make_decoder_model(input_tensor=None, input_shape=(28, 28, 1)):
    """ Create a decoder model
  Args:
    input_tensor: It's going to be tensor, which is going to be the array
    input_shape:  image tensor size.
  Returns:
    tf.keras.Model
  """
    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        if not backend.is_keras_tensor(input_tensor):
            img_input = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = layers.Dense(7 * 7 * 64, activation=tf.nn.relu, name="den1")(img_input)
    x = layers.Reshape(target_shape=(7, 7, 64), name="reshape1")(x)
    x = layers.Conv2DTranspose(64, (3, 3),
                               strides=(2, 2),
                               padding="SAME",
                               activation=tf.nn.relu,
                               name="convt1")(x),
    x = layers.Conv2DTranspose(32, (3, 3),
                               strides=(2, 2),
                               padding="SAME",
                               activation=tf.nn.relu,
                               name="convt2")(x),
    x = layers.Conv2DTranspose(1, (3, 3),
                               strides=(1, 1),
                               padding="SAME",
                               activation=tf.nn.sigmoid,
                               name="convt3")(x),

    if input_tensor is not None:
        inputs = utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input

    model = models.Model(inputs, x, name='Decoder_model')
    return model
Example #24
0
def make_model(env):
    original_matrix = np.genfromtxt(PATH_TO_CSV, delimiter=",", skip_header=39)
    stateCnt = original_matrix.shape[1]
    #input_shape = (10, stateCnt)
    # dataframe1 = pd.read_csv(PATH_TO_CSV, header=None, usecols=range(0,67), skiprows=39, engine='python')
    #dataframe.assign(s=dataframe.s.shift(-1)).drop(dataframe.index[-1])
    #dataframe[dataframe.columns[-1]] = dataframe[dataframe.columns[-1]].shift(-1)
    # dataframe1 = dataframe1[-1000:]
    # dataset1 = dataframe1.values
    # dataset1 = dataset1.astype('float64')
    #UNCOMMENT THIS TO SCALE ACCORDING TO CURRENT DATA
    # from sklearn import preprocessing
    # scaler1 = preprocessing.MinMaxScaler()
    # X1 = dataset1[:,0:67]
    #scaler1.fit(X1)
    # X1 = scaler1.transform(X1)
    # input_shape = X1.reshape(-1,10,67)
    #input_shape = observation_space
    # input_shape = np.reshape(-1,10,67)
    # num_frames = len(env.observation_space.spaces)
    # height, width = env.observation_space.spaces[0].shape
    # input_shape = height, width, num_frames
    input_shape=(10, stateCnt)
    ph_state = layers.Input(shape=input_shape)
    # conv1 = layers.Conv2D(32, (8, 8), strides=(4, 4))(ph_state)
    conv1 = layers.Conv1D(filters=2, kernel_size=1)(ph_state)
    conv1 = layers.Activation('relu')(conv1)
    # conv2 = layers.Conv2D(64, (4, 4), strides=(2, 2))(conv1)
    # conv2 = layers.Activation('relu')(conv2)
    # conv3 = layers.Conv2D(64, (3, 3), strides=(1, 1))(conv2)
    # conv3 = layers.Activation('relu')(conv3)
    conv_flat = layers.Flatten()(conv1)
    feature = layers.Dense(512)(conv_flat)
    feature = layers.Activation('relu')(feature)
    # actor (policy) and critic (value) streams
    size_logits = 4
    size_value = 4
    logits_init = initializers.RandomNormal(stddev=1e-3)
    logits = layers.Dense(size_logits, kernel_initializer=logits_init)(feature)
    value = layers.Dense(size_value)(feature)
    return models.Model(inputs=ph_state, outputs=[logits, value])
Example #25
0
    def build_model(self):
        """Build a critic (value) network that maps (state, action) pairs -> Q-values."""
        # Define input layers
        states = layers.Input(shape=(self.state_size, ), name='states')
        actions = layers.Input(shape=(self.action_size, ), name='actions')

        # Add hidden layer(s) for state pathway
        net_states = layers.Dense(units=32, activation='relu')(states)
        net_states = layers.Dropout(0.8)(net_states)
        net_states = layers.Dense(units=64, activation='relu')(net_states)
        net_states = layers.Dropout(0.8)(net_states)

        # Add hidden layer(s) for action pathway
        net_actions = layers.Dense(units=32, activation='relu')(actions)
        net_actions = layers.Dense(units=64, activation='relu')(net_actions)

        # Try different layer sizes, activations, add batch normalization, regularizers, etc.

        # Combine state and action pathways
        net = layers.Add()([net_states, net_actions])
        net = layers.Activation('relu')(net)

        # Add more layers to the combined network if needed

        # Add final output layer to prduce action values (Q values)
        Q_values = layers.Dense(units=1, name='q_values')(net)

        # Create Keras model
        self.model = models.Model(inputs=[states, actions], outputs=Q_values)

        # Define optimizer and compile model for training with built-in loss function
        optimizer = optimizers.Adam(lr=0.0001)
        self.model.compile(optimizer=optimizer, loss='mse')

        # Compute action gradients (derivative of Q values w.r.t. to actions)
        action_gradients = K.gradients(Q_values, actions)

        # Define an additional function to fetch action gradients (to be used by actor model)
        self.get_action_gradients = K.function(
            inputs=[*self.model.input, K.learning_phase()],
            outputs=action_gradients)
Example #26
0
def get_model256():
    inputs = layers.Input(shape=(256, 256, 3))
    # 256

    encoder0_pool, encoder0 = encoder_block(inputs, 32)
    # 128

    encoder1_pool, encoder1 = encoder_block(encoder0_pool, 64)
    # 64

    encoder2_pool, encoder2 = encoder_block(encoder1_pool, 128)
    # 32

    encoder3_pool, encoder3 = encoder_block(encoder2_pool, 256)
    # 16

    encoder4_pool, encoder4 = encoder_block(encoder3_pool, 512)
    # 8

    center = conv_block(encoder4_pool, 1024)
    # center

    decoder4 = decoder_block(center, encoder4, 512)
    # 16

    decoder3 = decoder_block(decoder4, encoder3, 256)
    # 32

    decoder2 = decoder_block(decoder3, encoder2, 128)
    # 64

    decoder1 = decoder_block(decoder2, encoder1, 64)
    # 128

    decoder0 = decoder_block(decoder1, encoder0, 32)
    # 256

    outputs = layers.Conv2D(1, (1, 1), activation='sigmoid')(decoder0)

    model = models.Model(inputs=[inputs], outputs=[outputs])
    return model
Example #27
0
  def test_model(self, strategy_fn, use_operator=False, use_regularizer=False,
                 cloning=True):
    if not self._is_strategy_supported(strategy_fn):
      return
    regularizer = IdentityRegularizer() if use_regularizer else None
    with strategy_fn().scope():
      with policy.policy_scope('infer_float32_vars'):
        x = layers.Input(shape=(1,), batch_size=2, dtype=dtypes.float16)
        layer = AddLayer(assert_type=dtypes.float16, use_operator=use_operator,
                         regularizer=regularizer)
        y = layer(x)
        y = math_ops.cast(y, dtypes.float32)
        model = models.Model(inputs=x, outputs=y)

        def loss_fn(y_true, y_pred):
          del y_true
          return math_ops.reduce_mean(y_pred)

        # Learning rate is small enough that if applied to a float16 variable,
        # the variable will not change. So this tests the learning rate not
        # applied to a float16 value, but instead the float32 variable.
        opt = gradient_descent.SGD(2 ** -14)
        model.compile(
            opt,
            loss=loss_fn,
            cloning=cloning,
            run_eagerly=testing_utils.should_run_eagerly(),
            run_distributed=testing_utils.should_run_distributed())

    self.assertEqual(backend.eval(layer.v), 1)
    x = np.ones((2, 1))
    y = np.ones((2, 1))
    dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(2)
    model.fit(dataset)
    # Variable starts at 1, and should have gradient of 2 ** -14 subtracted
    # from it.
    expected = 1 - 2 ** -14
    if use_regularizer:
      # Regularizer adds another 2 ** -14 to the gradient.
      expected -= 2 ** -14
    self.assertEqual(backend.eval(layer.v), expected)
Example #28
0
def quick_eval(params,save_model_path):
    img_dir = os.path.join(params["data_dir"], "test_images")
    label_dir = os.path.join(params["data_dir"], "test_labels")

    ids_train = [i.replace(".png","") for i in os.listdir(img_dir)]

    x_train_filenames = []
    y_train_filenames = []
    for img_id in ids_train:
      x_train_filenames.append(os.path.join(img_dir, "{}.png".format(img_id)))
      y_train_filenames.append(os.path.join(label_dir, "{}.png".format(img_id)))

    y_val_filenames = y_train_filenames
    x_val_filenames = x_train_filenames

    num_train_examples = len(x_train_filenames)
    num_val_examples = len(x_val_filenames)

    print("Number of training examples: {}".format(num_train_examples))
    print("Number of validation examples: {}".format(num_val_examples))

    # Alternatively, load the weights directly: model.load_weights(save_model_path)
    from keras.models import load_model
    inputs, outputs = model_fn(params["img_shape"])
    model = models.Model(inputs=[inputs], outputs=[outputs])
    model.load_weights(save_model_path)
    train_ds,val_ds,temp_ds = batch_data_sets(x_train_filenames,y_train_filenames,x_val_filenames,y_val_filenames)

    # Let's visualize some of the outputs
    data_aug_iter = val_ds.make_one_shot_iterator()
    next_element = data_aug_iter.get_next()

    # Running next element in our graph will produce a batch of images
    test_losses = []
    for i in range(12):
        batch_of_imgs, label = tf.keras.backend.get_session().run(next_element)
        img = batch_of_imgs[0]
        loss = model.evaluate(x=batch_of_imgs, y=label, batch_size=1, verbose=1, sample_weight=None, steps=None)
        test_losses.append(loss)

    print("average dice score over all samples are:{}".format(np.mean(test_losses)))
Example #29
0
    def test_save_weights(self, strategy_fn, h5=False):
        with strategy_fn().scope():
            with policy.policy_scope('infer_float32_vars'):
                x = layers.Input(shape=(), batch_size=2, dtype=dtypes.float16)
                layer = AddLayer(assert_type=dtypes.float16)
                y = layer(x)
                y = math_ops.cast(y, dtypes.float32)
                model = models.Model(inputs=x, outputs=y)

        model.set_weights([np.array(100.)])
        x = np.ones((2, 1), dtype=np.float16)
        self.assertAllClose(backend.get_value(model(x)), x + 100.)
        suffix = '.h5' if h5 else ''
        weights_file = os.path.join(self.get_temp_dir(), 'weights' + suffix)
        model.save_weights(weights_file)

        model.set_weights([np.array(200.)])
        self.assertAllClose(backend.get_value(model(x)), x + 200.)
        model.load_weights(weights_file)
        self.assertAllClose(backend.get_value(model(x)), x + 100.)
        self.assertEqual(model.get_weights(), [np.array(100.)])
Example #30
0
def get_model():
    # 内容层选取 block5_conv2
    content_layers = ['block5_conv2']

    # 风格层 conv1
    style_layers = ['block1_conv1',
                    'block2_conv1',
                    'block3_conv1',
                    'block4_conv1',
                    'block5_conv1']

    # 导入VGG19模型,权重使用imagenet
    vgg = tf.keras.applications.vgg19.VGG19(include_top=False, weights='imagenet')
    vgg.trainable = False
    # 提取风格特征
    style_outputs = [vgg.get_layer(name).output for name in style_layers]
    # 提取内容特征
    content_outputs = [vgg.get_layer(name).output for name in content_layers]
    # 输出结果特征
    model_outputs = style_outputs + content_outputs
    return models.Model(vgg.input, model_outputs)