def test_lambda(): layer_test(layers.Lambda, kwargs={'function': lambda x: x + 1}, input_shape=(3, 2)) layer_test(layers.Lambda, kwargs={ 'function': lambda x, a, b: x * a + b, 'arguments': { 'a': 0.6, 'b': 0.4 } }, input_shape=(3, 2)) def antirectifier(x): x -= K.mean(x, axis=1, keepdims=True) x = K.l2_normalize(x, axis=1) pos = K.relu(x) neg = K.relu(-x) return K.concatenate([pos, neg], axis=1) def antirectifier_output_shape(input_shape): shape = list(input_shape) assert len(shape) == 2 # only valid for 2D tensors shape[-1] *= 2 return tuple(shape) layer_test(layers.Lambda, kwargs={ 'function': antirectifier, 'output_shape': antirectifier_output_shape }, input_shape=(3, 2)) # test serialization with function def f(x): return x + 1 ld = layers.Lambda(f) config = ld.get_config() ld = deserialize_layer({'class_name': 'Lambda', 'config': config}) # test with lambda ld = layers.Lambda( lambda x: K.concatenate([K.square(x), x]), output_shape=lambda s: tuple(list(s)[:-1] + [2 * s[-1]])) config = ld.get_config() ld = layers.Lambda.from_config(config) # test serialization with output_shape function def f(x): return K.concatenate([K.square(x), x]) def f_shape(s): return tuple(list(s)[:-1] + [2 * s[-1]]) ld = layers.Lambda(f, output_shape=f_shape) config = ld.get_config() ld = deserialize_layer({'class_name': 'Lambda', 'config': config})
def test_lambda(): layer_test(layers.Lambda, kwargs={'function': lambda x: x + 1}, input_shape=(3, 2)) layer_test(layers.Lambda, kwargs={'function': lambda x, a, b: x * a + b, 'arguments': {'a': 0.6, 'b': 0.4}}, input_shape=(3, 2)) # test serialization with function def f(x): return x + 1 ld = layers.Lambda(f) config = ld.get_config() ld = deserialize_layer({'class_name': 'Lambda', 'config': config}) # test with lambda ld = layers.Lambda( lambda x: K.concatenate([K.square(x), x]), output_shape=lambda s: tuple(list(s)[:-1] + [2 * s[-1]])) config = ld.get_config() ld = layers.Lambda.from_config(config) # test serialization with output_shape function def f(x): return K.concatenate([K.square(x), x]) def f_shape(s): return tuple(list(s)[:-1] + [2 * s[-1]]) ld = layers.Lambda(f, output_shape=f_shape) config = ld.get_config() ld = deserialize_layer({'class_name': 'Lambda', 'config': config})
def from_config(cls, config, custom_objects=None): if custom_objects is None: custom_objects = {'LSTMPeephole': LSTMPeephole} from keras.layers import deserialize as deserialize_layer recurrent_layer = deserialize_layer(config.pop('recurrent_layer'), custom_objects=custom_objects) dense_layer = deserialize_layer(config.pop('dense_layer'), custom_objects=custom_objects) return cls(recurrent_layer, dense_layer, **config)
def test_lambda(): layer_test(layers.Lambda, kwargs={'function': lambda x: x + 1}, input_shape=(3, 2)) layer_test(layers.Lambda, kwargs={'function': lambda x, a, b: x * a + b, 'arguments': {'a': 0.6, 'b': 0.4}}, input_shape=(3, 2)) def antirectifier(x): x -= K.mean(x, axis=1, keepdims=True) x = K.l2_normalize(x, axis=1) pos = K.relu(x) neg = K.relu(-x) return K.concatenate([pos, neg], axis=1) def antirectifier_output_shape(input_shape): shape = list(input_shape) assert len(shape) == 2 # only valid for 2D tensors shape[-1] *= 2 return tuple(shape) layer_test(layers.Lambda, kwargs={'function': antirectifier, 'output_shape': antirectifier_output_shape}, input_shape=(3, 2)) # test serialization with function def f(x): return x + 1 ld = layers.Lambda(f) config = ld.get_config() ld = deserialize_layer({'class_name': 'Lambda', 'config': config}) # test with lambda ld = layers.Lambda( lambda x: K.concatenate([K.square(x), x]), output_shape=lambda s: tuple(list(s)[:-1] + [2 * s[-1]])) config = ld.get_config() ld = layers.Lambda.from_config(config) # test serialization with output_shape function def f(x): return K.concatenate([K.square(x), x]) def f_shape(s): return tuple(list(s)[:-1] + [2 * s[-1]]) ld = layers.Lambda(f, output_shape=f_shape) config = ld.get_config() ld = deserialize_layer({'class_name': 'Lambda', 'config': config})
def from_config(cls, config, custom_objects=None): from keras.layers import deserialize as deserialize_layer dna_layer = deserialize_layer(config.pop('layer'), custom_objects=custom_objects) layer = cls(dna_layer, **config) return layer
def from_config(cls, config, custom_objects=None): cell = deserialize_layer(config.pop('grnn_cell'), custom_objects=custom_objects) num_constants = config.pop('num_constants', None) layer = cls(cell, **config) layer._num_constants = num_constants return layer
def from_config(cls, config, custom_objects=None): from keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top cells = [] for cell_config in config.pop('cells'): cells.append( deserialize_layer(cell_config, custom_objects=custom_objects)) return cls(cells, **config)
def from_config(cls, config, custom_objects=None): # Avoid mutating the input dict. config = copy.deepcopy(config) model = deserialize_layer(config.pop("model"), custom_objects=custom_objects) config["model"] = model return super().from_config(config, custom_objects)
def from_config(cls, config, custom_objects=None): from keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top # Avoid mutating the input dict config = copy.deepcopy(config) layer = deserialize_layer( config.pop('layer'), custom_objects=custom_objects) return cls(layer, **config)
def from_config(cls, config, custom_objects=None): from keras.layers import deserialize as deserialize_layer cell = deserialize_layer(config.pop('cell'), custom_objects=custom_objects) num_constants = config.pop('num_constants', None) layer = cls(cell, **config) layer._num_constants = num_constants return layer
def from_config(cls, config, custom_objects=None): from keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top cell = deserialize_layer(config.pop('cell'), custom_objects=custom_objects) num_constants = config.pop('num_constants', 0) layer = cls(cell, **config) layer._num_constants = num_constants # pylint: disable=protected-access return layer
def from_config(cls, config, custom_objects=None): from keras.layers import deserialize as deserialize_layer # Avoid mutating the input dict config = copy.deepcopy(config) layer = deserialize_layer(config.pop("layer"), custom_objects=custom_objects) return cls(layer, **config)
def from_config(cls, config, custom_objects=None): from keras.layers import deserialize as deserialize_layer cells = [] for cell_config in config.pop("cells"): cells.append( deserialize_layer(cell_config, custom_objects=custom_objects) ) return cls(cells, **config)
def from_config(cls, config, custom_objects=None): # Instead of updating the input, create a copy and use that. config = copy.deepcopy(config) num_constants = config.pop('num_constants', 0) # Handle forward layer instantiation (as would parent class). from keras.layers import deserialize as deserialize_layer # pylint: disable=g-import-not-at-top config['layer'] = deserialize_layer( config['layer'], custom_objects=custom_objects) # Handle (optional) backward layer instantiation. backward_layer_config = config.pop('backward_layer', None) if backward_layer_config is not None: backward_layer = deserialize_layer( backward_layer_config, custom_objects=custom_objects) config['backward_layer'] = backward_layer # Instantiate the wrapper, adjust it and return it. layer = cls(**config) layer._num_constants = num_constants return layer
def from_config(cls, config): from keras.layers import deserialize as deserialize_layer layers_config = config.pop('layers') layers = [ deserialize_layer(layers_config[i]) for i in range(len(layers_config)) ] return cls(layers, **config)
def test_lambda(): layer_test(layers.Lambda, kwargs={'function': lambda x: x + 1}, input_shape=(3, 2)) layer_test(layers.Lambda, kwargs={'function': lambda x, a, b: x * a + b, 'arguments': {'a': 0.6, 'b': 0.4}}, input_shape=(3, 2)) def antirectifier(x): x -= K.mean(x, axis=1, keepdims=True) x = K.l2_normalize(x, axis=1) pos = K.relu(x) neg = K.relu(-x) return K.concatenate([pos, neg], axis=1) def antirectifier_output_shape(input_shape): shape = list(input_shape) assert len(shape) == 2 # only valid for 2D tensors shape[-1] *= 2 return tuple(shape) layer_test(layers.Lambda, kwargs={'function': antirectifier, 'output_shape': antirectifier_output_shape}, input_shape=(3, 2)) # test layer with multiple outputs def test_multiple_outputs(): def func(x): return [x * 0.2, x * 0.3] def output_shape(input_shape): return [input_shape, input_shape] def mask(inputs, mask=None): return [None, None] i = layers.Input(shape=(64, 64, 3)) o = layers.Lambda(function=func, output_shape=output_shape, mask=mask)(i) o1, o2 = o assert o1._keras_shape == (None, 64, 64, 3) assert o2._keras_shape == (None, 64, 64, 3) model = Model(i, o) x = np.random.random((4, 64, 64, 3)) out1, out2 = model.predict(x) assert out1.shape == (4, 64, 64, 3) assert out2.shape == (4, 64, 64, 3) assert_allclose(out1, x * 0.2, atol=1e-4) assert_allclose(out2, x * 0.3, atol=1e-4) test_multiple_outputs() # test serialization with function def f(x): return x + 1 ld = layers.Lambda(f) config = ld.get_config() ld = deserialize_layer({'class_name': 'Lambda', 'config': config}) # test with lambda ld = layers.Lambda( lambda x: K.concatenate([K.square(x), x]), output_shape=lambda s: tuple(list(s)[:-1] + [2 * s[-1]])) config = ld.get_config() ld = layers.Lambda.from_config(config) # test serialization with output_shape function def f(x): return K.concatenate([K.square(x), x]) def f_shape(s): return tuple(list(s)[:-1] + [2 * s[-1]]) ld = layers.Lambda(f, output_shape=f_shape) config = ld.get_config() ld = deserialize_layer({'class_name': 'Lambda', 'config': config})
def from_config(cls, config, custom_objects=None): cells = [] for cell_config in config.pop('cells'): cells.append( deserialize_layer(cell_config, custom_objects=custom_objects)) return cls(cells, **config)