def testQuantizesWeightsInLayer(self): weights = lambda shape, dtype: np.array([[-1.0, 0.0], [0.0, 1.0]]) model = keras.Sequential([ QuantizeEmulateWrapper(keras.layers.Dense( 2, kernel_initializer=weights), input_shape=(2, ), **self.quant_params) ]) # FakeQuant([-1.0, 1.0]) = [-0.9882355, 0.9882355] # Obtained from tf.fake_quant_with_min_max_vars self.assertAllClose( np.array([[-0.9882355, 0.9882355]]), # Inputs are all ones, so result comes directly from weights. model.predict(np.ones((1, 2))))
def make_model(self, feature_shape): normal = K.initializers.glorot_normal() model = K.Sequential() model.add( K.layers.Dense(units=64, input_shape=feature_shape, kernel_initializer=normal, activation="relu")) model.add( K.layers.Dense(units=len(self.actions), kernel_initializer=normal, activation="relu")) self.model = model self._teacher_model = K.models.clone_model(self.model)
def initialize(self, state, weights=()): normal = K.initializers.glorot_normal() model = K.Sequential() model.add( K.layers.Conv2D(3, kernel_size=5, strides=3, input_shape=state.shape, kernel_initializer=normal, activation="relu")) model.add(K.layers.Flatten()) model.add(K.layers.Dense(len(self.actions), activation="softmax")) self.model = model if len(weights) > 0: self.model.set_weights(weights)
def get_model(self, cloning, distribution=None, input_shapes=None): with distribution.scope(): model = keras.Sequential() model.add( keras.layers.Dense( 3, activation='relu', input_dim=4, kernel_initializer='ones')) model.add( keras.layers.Dense( 1, activation='sigmoid', kernel_initializer='ones')) model.compile( loss='mae', metrics=['accuracy', keras.metrics.BinaryAccuracy()], optimizer=gradient_descent.GradientDescentOptimizer(0.001), cloning=cloning) return model
def get_model(self, cloning, distribution=None, input_shapes=None): with distribution.scope(): model = keras.Sequential() model.add( keras.layers.Dense(1, input_shape=(1, ), kernel_initializer='ones')) model.compile( loss=keras.losses.mean_squared_error, # TODO(b/130808953): Switch back to the V1 optimizer after # global_step is made mirrored. optimizer=gradient_descent_keras.SGD(0.5), metrics=[keras.metrics.BinaryAccuracy()], cloning=cloning) return model
def test_wide_deep_model_with_single_feature_column(self): vocab_list = ['alpha', 'beta', 'gamma'] vocab_val = [0.4, 0.6, 0.9] data = np.random.choice(vocab_list, size=256) y = np.zeros_like(data, dtype=np.float32) for vocab, val in zip(vocab_list, vocab_val): indices = np.where(data == vocab) y[indices] = val + np.random.uniform( low=-0.01, high=0.01, size=indices[0].shape) cat_column = feature_column_v2.categorical_column_with_vocabulary_list( key='symbol', vocabulary_list=vocab_list) ind_column = feature_column_v2.indicator_column(cat_column) dense_feature_layer = dense_features_v2.DenseFeatures([ind_column]) linear_model = linear.LinearModel( use_bias=False, kernel_initializer='zeros') dnn_model = keras.Sequential([keras.layers.Dense(units=1)]) wide_deep_model = wide_deep.WideDeepModel(linear_model, dnn_model) combined = keras.Sequential([dense_feature_layer, wide_deep_model]) opt = gradient_descent.SGD(learning_rate=0.1) combined.compile( opt, 'mse', [], run_eagerly=testing_utils.should_run_eagerly()) combined.fit(x={'symbol': data}, y=y, batch_size=32, epochs=10)
def _normal(): model = keras.Sequential([ keras.layers.Conv2D( input_shape=(66, 200, 3), filters=24, kernel_size=5, strides=2, padding="valid", activation="relu", ), keras.layers.Conv2D(filters=36, kernel_size=5, strides=2, padding="valid", activation="relu"), keras.layers.Conv2D(filters=48, kernel_size=5, strides=2, padding="valid", activation="relu"), keras.layers.BatchNormalization(fused=True), keras.layers.Conv2D(filters=64, kernel_size=3, strides=1, padding="valid", activation="relu"), keras.layers.Conv2D(filters=64, kernel_size=3, strides=1, padding="valid"), keras.layers.BatchNormalization(fused=True), keras.layers.Flatten(), keras.layers.Dense(units=1164, activation="relu"), keras.layers.Dense(units=100, activation="relu"), keras.layers.Dense(units=50, activation="relu"), keras.layers.Dropout(rate=0.1), keras.layers.Dense(units=10, activation="relu"), keras.layers.BatchNormalization(fused=True), keras.layers.Dense(units=1, activation="linear"), ]) for node in model.outputs: print(node) model.summary() savedmodel = contrib.saved_model.save_keras_model( model=model, saved_model_path="./savemodel")
def test_enables_nontensor_plumbing(self): # Setup. class Foo(object): def __init__(self, input_): self._input = input_ self.value = ops.convert_to_tensor(42.) ops.register_tensor_conversion_function( Foo, lambda x, *args, **kwargs: x.value) tf_utils.register_symbolic_tensor_type(Foo) class PlumbingLayer(keras.layers.Lambda): def __init__(self, fn, **kwargs): def _fn(*fargs, **fkwargs): d = fn(*fargs, **fkwargs) x = ops.convert_to_tensor(d) d.shape = x.shape d.get_shape = x.get_shape return d, x super(PlumbingLayer, self).__init__(_fn, **kwargs) self._enter_dunder_call = False def __call__(self, inputs, *args, **kwargs): self._enter_dunder_call = True d, _ = super(PlumbingLayer, self).__call__(inputs, *args, **kwargs) self._enter_dunder_call = False return d def call(self, inputs, *args, **kwargs): d, v = super(PlumbingLayer, self).call(inputs, *args, **kwargs) if self._enter_dunder_call: return d, v return d # User-land. model = keras.Sequential([ keras.layers.InputLayer([]), PlumbingLayer(Foo), # Makes a `Foo` object. ]) # Let's ensure Keras graph history is preserved by composing the models. model = keras.Model(model.inputs, model(model.outputs)) # Now we instantiate the model and verify we have a `Foo` object, not a # `Tensor`. y = model(ops.convert_to_tensor(7.)) self.assertIsInstance(y, Foo)
def test_dataset_fit_correctness(self): class SumLayer(keras.layers.Layer): def build(self, _): self.w = self.add_weight('w', ()) def call(self, inputs): return keras.backend.sum(inputs, axis=1, keepdims=True) + self.w * 0 model = keras.Sequential([SumLayer(input_shape=(2,))]) model.compile( 'rmsprop', loss='mae', run_eagerly=testing_utils.should_run_eagerly()) inputs = np.zeros((40, 2), dtype=np.float32) inputs[10:20, :] = 2 inputs[20:30, :] = 1 inputs[30:, :] = 4 targets = np.zeros((40, 1), dtype=np.float32) # Test correctness with `steps_per_epoch`. train_dataset = dataset_ops.Dataset.from_tensor_slices( (inputs, targets)).batch(10) val_dataset = dataset_ops.Dataset.from_tensor_slices( (inputs, targets)).batch(10) history = model.fit(train_dataset, epochs=2, steps_per_epoch=2, verbose=1, validation_data=val_dataset, validation_steps=2) self.assertAllClose(history.history['loss'], [inputs[:20].sum() / 20, inputs[20:].sum() / 20]) # The validation dataset will be reset at the end of each validation run. self.assertAllClose(history.history['val_loss'], [inputs[:20].sum() / 20, inputs[:20].sum() / 20]) # Test correctness with dataset reset. train_dataset = dataset_ops.Dataset.from_tensor_slices( (inputs, targets)).batch(10) val_dataset = dataset_ops.Dataset.from_tensor_slices( (inputs, targets)).batch(10) history = model.fit(train_dataset, epochs=2, verbose=1, validation_data=val_dataset) self.assertAllClose( history.history['loss'], [inputs.sum() / 40, inputs.sum() / 40]) self.assertAllClose( history.history['val_loss'], [inputs.sum() / 40, inputs.sum() / 40])
def test_equal(self): norm_layer = LayerNormalization() model = keras.Sequential( [keras.layers.InputLayer(input_shape=(16, 256)), norm_layer]) # model.build(input_shape=(3, 16, 256)) model.compile(optimizer=keras.optimizers.Adam(), loss='mse') # model.summary() model.fit(np.zeros((3, 16, 256)), np.ones((3, 16, 256))) model.summary() inputs = np.zeros((3, 16, 256)) predicted = model.predict(inputs) expected = np.ones_like(inputs) np.allclose(expected, predicted)
def test_tf_module_training(self): class MyModule(module.Module): def __init__(self): self.v = variables.Variable(2.) def call(self, x, training=None): # training should be set by Sequential. assert training is not None return self.v * x model = keras.Sequential() model.add(MyModule()) model.compile('sgd', 'mse') x, y = np.ones((10, 1)), np.ones((10, 1)) model.fit(x, y, batch_size=2) self.assertLen(model.trainable_variables, 1)
def _create_dnn_model(initial_weights=None, distribution=None): with MaybeDistributionScope(distribution): # We add few non-linear layers to make it non-trivial. model = keras.Sequential() model.add(keras.layers.Dense(10, activation='relu', input_shape=(1, ))) model.add(keras.layers.Dense(10, activation='relu')) model.add(keras.layers.Dense(10, activation='relu')) model.add(keras.layers.Dense(1)) if initial_weights: model.set_weights(initial_weights) model.compile(loss=keras.losses.mean_squared_error, optimizer=gradient_descent_keras.SGD(0.5), metrics=['mse']) return model
def test_plot_model_rnn(self): model = keras.Sequential() model.add( keras.layers.LSTM(16, return_sequences=True, input_shape=(2, 3), name='lstm')) model.add( keras.layers.TimeDistributed(keras.layers.Dense(5, name='dense'))) dot_img_file = 'model_2.png' try: vis_utils.plot_model(model, to_file=dot_img_file, show_shapes=True) self.assertTrue(file_io.file_exists(dot_img_file)) file_io.delete_file(dot_img_file) except ImportError: pass
def testQuantize_PreferenceToUserSpecifiedQuantizeProvider(self): annotated_model = keras.Sequential([ QuantizeAnnotate(keras.layers.Dense(3), input_shape=(2, ), quantize_provider=_TestQuantizeProvider()) ]) with generic_utils.custom_object_scope( {'_TestQuantizeProvider': _TestQuantizeProvider}): quantized_model = quantize_apply(annotated_model) quantized_layer = quantized_model.layers[0] self._assert_layer_quantized(annotated_model.layers[0], quantized_layer) self.assertIsInstance(quantized_layer.quantize_provider, _TestQuantizeProvider)
def test_plot_model_cnn(self): model = keras.Sequential() model.add( keras.layers.Conv2D(filters=2, kernel_size=(2, 3), input_shape=(3, 5, 5), name='conv')) model.add(keras.layers.Flatten(name='flat')) model.add(keras.layers.Dense(5, name='dense')) dot_img_file = 'model_1.png' try: vis_utils.plot_model(model, to_file=dot_img_file, show_shapes=True) self.assertTrue(file_io.file_exists(dot_img_file)) file_io.delete_file(dot_img_file) except ImportError: pass
def __init__(self, a, b): super(SubclassedModelNoConfig, self).__init__() self.a = a self.b = b self.shared = CustomLayerNoConfig(a, b) self.all_layers = [ self.shared, CustomLayerWithConfig(a + 1, b + 2), CustomLayerNoConfig(a + 3, b + 4), keras.Sequential([ # TODO(b/145029112): Bug with losses when there are shared layers. # self.shared, <-- Enable when bug is fixed. CustomLayerNoConfig(a + 5, b + 6) ]) ]
def _test_model_builder(model_type: ModelType, compile_model, build_model): if model_type == ModelType.SEQUENTIAL: model = keras.Sequential([keras.layers.Dense(10)]) elif model_type == ModelType.SUBCLASS: model = TestModel() if compile_model: model.compile( gradient_descent.SGD(), loss='mse', metrics=[keras.metrics.CategoricalAccuracy(), DictMetric()]) if build_model: model.build((None, 32)) return model
def _mnist_convnet(self): model = keras.Sequential() model.add( keras.layers.Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(keras.layers.Conv2D(64, (3, 3), activation='relu')) model.add(keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add(keras.layers.Dropout(0.25)) model.add(keras.layers.Flatten()) model.add(keras.layers.Dense(128, activation='relu')) model.add(keras.layers.Dropout(0.5)) model.add(keras.layers.Dense(10, activation='softmax')) return model
def test_adapt_sets_input_shape_rank(self): """Check that `.adapt()` sets the `input_shape`'s rank.""" # Shape: (3,1,2) adapt_dataset = np.array([[[1., 2.]], [[3., 4.]], [[5., 6.]]], dtype=np.float32) layer = get_layer() layer.adapt(adapt_dataset) input_dataset = np.array([[[1., 2.], [3., 4.]], [[3., 4.], [5., 6.]]], dtype=np.float32) layer(input_dataset) model = keras.Sequential([layer]) self.assertTrue(model.built) self.assertEqual(model.input_shape, (None, None, None))
def get_model(self, experimental_run_tf_function, distribution=None, input_shapes=None): with distribution.scope(): model = keras.Sequential() model.add( keras.layers.Dense(1, input_shape=(1, ), kernel_initializer='ones')) model.compile( loss=keras.losses.mean_squared_error, optimizer=gradient_descent_keras.SGD(0.05), metrics=[keras.metrics.BinaryAccuracy()], experimental_run_tf_function=experimental_run_tf_function) return model
def single_branch_model(self): model = K.Sequential() model.add(K.layers.Conv2D(filters=32, kernel_size=7, input_shape=(32, 32, 1))) model.add(K.layers.Activation('tanh')) model.add(K.layers.MaxPooling2D(pool_size=2, strides=2)) model.add(K.layers.Conv2D(filters=64, kernel_size=6)) model.add(K.layers.Activation('tanh')) model.add(K.layers.Flatten()) model.add(K.layers.Dense(128)) model.add(K.layers.Activation('tanh')) model.summary() return model
def test_tf_module_call(self): class MyModule(module.Module): def __init__(self): self.v = variables.Variable(2.) def __call__(self, x): return self.v * x model = keras.Sequential() model.add(MyModule()) model.compile('sgd', 'mse') x, y = np.ones((10, 1)), np.ones((10, 1)) model.fit(x, y, batch_size=2) self.assertLen(model.trainable_variables, 1)
def model_test(X, Y, dropout, count, num, number): model = keras.Sequential() model.add(layers.Flatten(input_shape=((int(number)*2+1)*4,))) model.add(layers.Dense(64, activation="relu")) model.add(layers.Dense(128, activation="relu")) model.add(layers.Dense(256, activation="relu")) model.add(layers.Dense(128, activation="relu")) model.add(layers.Dense(64, activation="relu")) model.add(layers.Dropout(dropout)) model.add(layers.Dense(2, activation="sigmoid")) model.compile(optimizer="Adam", loss="binary_crossentropy", metrics=['accuracy']) model.fit(X, Y, epochs=5, batch_size=256, verbose=0) model.save( r'H:\pyworkspace\final_funsion\base(' + number + r')\onehot\model\fold_' + count + r'\onehot' + count + '_funsion_' + num + '.h5')
def __init__( self, labels: RVconf = RVconf(10, 'onehot', projection=True, name="digits"), encoder_y: Optional[Union[LayerCreator, Literal['tie', 'copy']]] = None, decoder_y: Optional[Union[LayerCreator, Literal['tie', 'copy']]] = None, alpha: float = 10., n_semi_iw: int = (), skip_decoder: bool = False, name: str = 'MultitaskVAE', **kwargs, ): super().__init__(name=name, **kwargs) self.labels = _parse_layers(labels) self.labels: DistributionDense self.alpha = tf.convert_to_tensor(alpha, dtype=self.dtype, name='alpha') self.n_semi_iw = n_semi_iw self.skip_decoder = bool(skip_decoder) ## prepare encoder for Y if encoder_y is not None: units_z = sum( np.prod(z.event_shape if hasattr(z, 'event_shape') else z. output_shape) for z in as_tuple(self.latents)) if isinstance(encoder_y, string_types): # copy if encoder_y == 'tie': layers = [] elif encoder_y == 'copy': layers = [ keras.models.clone_model(self.encoder), keras.layers.Flatten() ] else: raise ValueError(f'No support for encoder_y={encoder_y}') else: # different network layers = [_parse_layers(encoder_y)] layers.append( RVconf(units_z, 'mvndiag', projection=True, name='qzy_x').create_posterior()) encoder_y = keras.Sequential(layers, name='encoder_y') self.encoder_y = encoder_y ## prepare decoder for Y if decoder_y is not None: decoder_y = _parse_layers(decoder_y) self.decoder_y = decoder_y
def testAppliesQuantizationPreActivation(self): layer = self.TestLayer() layer.activation = QuantizeAwareActivation(activations.get('softmax'), self.quantizer, 0, layer) model = keras.Sequential([layer]) x = np.array([[1.0, 2.0]]) # expected_activation is determined using the float buckets when [-6, 6] is # quantized. Derived using `tf.fake_quant_with_min_max_vars`. For sigmoid, # quantization is applied twice. # # FakeQuant([1.0, 2.0]) = [0.9882355, 1.9764705] # Softmax([0.9882355, 1.9764705]) = [0.27126083, 0.72873914] expected_activation = np.array([[0.27126083, 0.72873914]]) self.assertAllClose(expected_activation, model.predict(x))
def testAppliesQuantizationPostActivation(self): layer = self.TestLayer() layer.activation = QuantizeAwareActivation(activations.get('relu'), self.quantizer, 0, layer) model = keras.Sequential([layer]) x = np.array([-6.0, -3.0, 0.0, 0.05, 0.1, 3.0, 6.0]) # All negative values are removed due to ReLU. The other expected values # are the border values of float buckets when [-6, 6] range is quantized to # 256 buckets. # Derived using `tf.fake_quant_with_min_max_vars` expected_activation = np.array( [0.0, 0.0, 0.0, 0.04705906, 0.09411764, 3.011765, 5.9764705]).reshape(7, 1) self.assertAllClose(expected_activation, model.predict(x))
def test_sequential_save_and_pop(self): # Test the following sequence of actions: # - construct a Sequential model and train it # - save it # - load it # - pop its last layer and add a new layer instead # - continue training np.random.seed(1337) (x_train, y_train), _ = testing_utils.get_test_data( train_samples=100, test_samples=0, input_shape=(10,), num_classes=2) y_train = np_utils.to_categorical(y_train) model = keras.Sequential([ keras.layers.Dense(16, activation='relu'), keras.layers.Dropout(0.1), keras.layers.Dense(y_train.shape[-1], activation='softmax') ]) model.compile( loss='categorical_crossentropy', optimizer=keras.optimizer_v2.adam.Adam(0.005), metrics=['acc'], run_eagerly=testing_utils.should_run_eagerly()) model.fit(x_train, y_train, epochs=1, batch_size=10, validation_data=(x_train, y_train), verbose=2) model = self._save_and_reload_model(model) model.pop() model.add(keras.layers.Dense(y_train.shape[-1], activation='softmax')) model.compile( loss='categorical_crossentropy', optimizer=keras.optimizer_v2.adam.Adam(0.005), metrics=['acc'], run_eagerly=testing_utils.should_run_eagerly()) history = model.fit(x_train, y_train, epochs=10, batch_size=10, validation_data=(x_train, y_train), verbose=2) self.assertGreater(history.history['val_acc'][-1], 0.7) model = self._save_and_reload_model(model) _, val_acc = model.evaluate(x_train, y_train) self.assertAlmostEqual(history.history['val_acc'][-1], val_acc) predictions = model.predict(x_train) self.assertEqual(predictions.shape, (x_train.shape[0], 2))
def testKerasLSTMPredict(self): instrument = _NumpyFunctionCallback(float_only=True) op_callbacks.add_op_callback(instrument.callback) model = keras.Sequential() model.add(keras.layers.LSTM(1, input_shape=(2, 4))) model.compile(loss="mse", optimizer="sgd") xs = np.zeros([8, 2, 4], dtype=np.float32) ys = model.predict(xs) self.assertAllClose(ys, np.zeros([8, 1])) # We avoid asserting on the internal details of the LSTM implementation. # Instead, we just assert that some graph-internal execution states are # recorded by the callback. self.assertTrue(instrument.graph_internal_ndarrays)
def test_generator_methods(self): model = keras.Sequential() model.add(keras.layers.Dense(4, input_shape=(3, ))) optimizer = RMSPropOptimizer(learning_rate=0.001) model.compile(optimizer, 'mse', metrics=['mae']) x = np.random.random((10, 3)) y = np.random.random((10, 4)) def iterator(): while True: yield x, y model.fit_generator(iterator(), steps_per_epoch=3, epochs=1) model.evaluate_generator(iterator(), steps=3) out = model.predict_generator(iterator(), steps=3) self.assertEqual(out.shape, (30, 4))
def get_instance(self, img_width, img_height): result_model = keras.Sequential([ Conv2D(16, 3, padding='same', activation='relu', input_shape=(img_height, img_width, 3)), MaxPooling2D(), Conv2D(32, 3, padding='same', activation='relu'), MaxPooling2D(), Conv2D(64, 3, padding='same', activation='relu'), MaxPooling2D(), Flatten(), Dense(512, activation='relu'), Dense(1, activation='sigmoid') ]) return result_model