def layer_test(layer_cls, kwargs={}, input_shape=None, input_dtype=None, input_data=None, expected_output=None, expected_output_dtype=None, fixed_batch_size=False, tolerance=1e-3): """Test routine for a layer with a single input tensor and single output tensor. """ # generate input data if input_data is None: assert input_shape if not input_dtype: input_dtype = K.floatx() input_data_shape = list(input_shape) for i, e in enumerate(input_data_shape): if e is None: input_data_shape[i] = np.random.randint(1, 4) input_data = (10 * np.random.random(input_data_shape)) input_data = input_data.astype(input_dtype) else: if input_shape is None: input_shape = input_data.shape if input_dtype is None: input_dtype = input_data.dtype if expected_output_dtype is None: expected_output_dtype = input_dtype # instantiation layer = layer_cls(**kwargs) # test get_weights , set_weights at layer level weights = layer.get_weights() layer.set_weights(weights) # test and instantiation from weights if 'weights' in inspect.getargspec(layer_cls.__init__): kwargs['weights'] = weights layer = layer_cls(**kwargs) # test in functional API if fixed_batch_size: x = Input(batch_shape=input_shape, dtype=input_dtype) else: x = Input(shape=input_shape[1:], dtype=input_dtype) y = layer(x) assert K.dtype(y) == expected_output_dtype # check shape inference model = Model(x, y) expected_output_shape = layer.compute_output_shape(input_shape) actual_output = model.predict(input_data) actual_output_shape = actual_output.shape for expected_dim, actual_dim in zip(expected_output_shape, actual_output_shape): if expected_dim is not None: assert expected_dim == actual_dim if expected_output is not None: if tolerance is not None: assert_allclose(actual_output, expected_output, rtol=tolerance) # test serialization, weight setting at model level model_config = model.get_config() recovered_model = Model.from_config(model_config) if model.weights: weights = model.get_weights() recovered_model.set_weights(weights) _output = recovered_model.predict(input_data) if tolerance is not None: assert_allclose(_output, actual_output, rtol=tolerance) # test training mode (e.g. useful for dropout tests) model.compile('rmsprop', 'mse') model.train_on_batch(input_data, actual_output) # test as first layer in Sequential API layer_config = layer.get_config() layer_config['batch_input_shape'] = input_shape layer = layer.__class__.from_config(layer_config) model = Sequential() model.add(layer) actual_output = model.predict(input_data) actual_output_shape = actual_output.shape for expected_dim, actual_dim in zip(expected_output_shape, actual_output_shape): if expected_dim is not None: assert expected_dim == actual_dim if expected_output is not None: if tolerance is not None: assert_allclose(actual_output, expected_output, rtol=1e-3) # test serialization, weight setting at model level model_config = model.get_config() recovered_model = Sequential.from_config(model_config) if model.weights: weights = model.get_weights() recovered_model.set_weights(weights) _output = recovered_model.predict(input_data) if tolerance is not None: assert_allclose(_output, actual_output, rtol=1e-3) # test training mode (e.g. useful for dropout tests) model.compile('rmsprop', 'mse') model.train_on_batch(input_data, actual_output) # for further checks in the caller function return actual_output
##### validation_split (in ImageDataGenerator) = 0.15 start_f = 15 depth = 5 model = tf.keras.Sequential() for i in range(depth): #repete the block for depth times if i == 0: input_shape = [img_h, img_w, 3] else: input_shape=[None] model.add(tf.keras.layers.Conv2D(filters=start_f, kernel_size=(3,3), strides=(1,1), padding='same', input_shape=input_shape)) model.add(tf.keras.layers.ReLU()) model.add(tf.keras.layers.MaxPool2D(pool_size=(2,2))) start_f *=2 model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(units=64, activation='relu')) model.add(tf.keras.layers.Dense(units=num_classes, activation='softmax')) # <<< WITHOUT TL # In[ ]:
def layer_test(test_case, layer_cls, kwargs={}, input_shape=None, input_dtype=None, input_data=None, expected_output=None, expected_output_dtype=None, fixed_batch_size=False): """ Test routine for a layer with a single input tensor and single output tensor. """ if input_data is None: assert input_shape if not input_dtype: input_dtype = K.floatx() input_data_shape = list(input_shape) for i, e in enumerate(input_data_shape): if e is None: input_data_shape[i] = np.random.randint(1, 4) input_data = (10 * np.random.random(input_data_shape)) input_data = input_data.astype(input_dtype) elif input_shape is None: input_shape = input_data.shape if expected_output_dtype is None: expected_output_dtype = input_dtype # instantiation layer = layer_cls(**kwargs) # test get_weights , set_weights weights = layer.get_weights() layer.set_weights(weights) # test and instantiation from weights if 'weights' in inspect.getargspec(layer_cls.__init__): kwargs['weights'] = weights layer = layer_cls(**kwargs) # test in functional API if fixed_batch_size: x = Input(batch_shape=input_shape, dtype=input_dtype) else: x = Input(shape=input_shape[1:], dtype=input_dtype) y = layer(x) test_case.assertEqual(K.dtype(y), expected_output_dtype) model = Model(input=x, output=y) model.compile('rmsprop', 'mse') expected_output_shape = layer.get_output_shape_for(input_shape) actual_output = model.predict(input_data) actual_output_shape = actual_output.shape for expected_dim, actual_dim in zip(expected_output_shape, actual_output_shape): if expected_dim is not None: test_case.assertEqual(expected_dim, actual_dim) if expected_output is not None: assert_allclose(actual_output, expected_output, rtol=1e-3) model = Sequential() model.add(layer) model.compile('rmsprop', 'mse') actual_output = model.predict(input_data) actual_output_shape = actual_output.shape for expected_dim, actual_dim in zip(expected_output_shape, actual_output_shape): if expected_dim is not None: test_case.assertEqual(expected_dim, actual_dim) if expected_output is not None: assert_allclose(actual_output, expected_output, rtol=1e-3) # for further checks in the caller function return actual_output