예제 #1
0
파일: lstm_test.py 프로젝트: lengjia/RRL
    def test_specify_initial_state_keras_tensor(self):
        num_states = 2
        timesteps = 3
        embedding_dim = 4
        units = 3
        num_samples = 2

        with self.test_session():
            # Test with Keras tensor
            inputs = keras.Input((timesteps, embedding_dim))
            initial_state = [keras.Input((units, )) for _ in range(num_states)]
            layer = keras.layers.LSTM(units)
            if len(initial_state) == 1:
                output = layer(inputs, initial_state=initial_state[0])
            else:
                output = layer(inputs, initial_state=initial_state)
            assert initial_state[0] in layer.inbound_nodes[0].input_tensors

            model = keras.models.Model([inputs] + initial_state, output)
            model.compile(loss='categorical_crossentropy', optimizer='adam')

            inputs = np.random.random((num_samples, timesteps, embedding_dim))
            initial_state = [
                np.random.random((num_samples, units))
                for _ in range(num_states)
            ]
            targets = np.random.random((num_samples, units))
            model.train_on_batch([inputs] + initial_state, targets)
예제 #2
0
    def test_initial_states_as_other_inputs(self):
        timesteps = 3
        embedding_dim = 4
        units = 3
        num_samples = 2
        num_states = 2
        layer_class = keras.layers.LSTM

        with self.test_session():
            # Test with Keras tensor
            main_inputs = keras.Input((timesteps, embedding_dim))
            initial_state = [keras.Input((units, )) for _ in range(num_states)]
            inputs = [main_inputs] + initial_state

            layer = layer_class(units)
            output = layer(inputs)
            assert initial_state[0] in layer.inbound_nodes[0].input_tensors

            model = keras.models.Model(inputs, output)
            model.compile(loss='categorical_crossentropy', optimizer='adam')

            main_inputs = np.random.random(
                (num_samples, timesteps, embedding_dim))
            initial_state = [
                np.random.random((num_samples, units))
                for _ in range(num_states)
            ]
            targets = np.random.random((num_samples, units))
            model.train_on_batch([main_inputs] + initial_state, targets)
예제 #3
0
    def test_layer_sharing_at_heterogenous_depth_with_concat(self):
        with self.test_session():
            input_shape = (16, 9, 3)
            input_layer = keras.Input(shape=input_shape)

            a = keras.layers.Dense(3, name='dense_A')
            b = keras.layers.Dense(3, name='dense_B')
            c = keras.layers.Dense(3, name='dense_C')

            x1 = b(a(input_layer))
            x2 = a(c(input_layer))
            output = keras.layers.concatenate([x1, x2])

            m = keras.models.Model(inputs=input_layer, outputs=output)

            x_val = np.random.random((10, 16, 9, 3))
            output_val = m.predict(x_val)

            config = m.get_config()
            weights = m.get_weights()

            m2 = keras.models.Model.from_config(config)
            m2.set_weights(weights)

            output_val_2 = m2.predict(x_val)
            self.assertAllClose(output_val, output_val_2, atol=1e-6)
예제 #4
0
 def test_is_keras_tensor(self):
   x = keras.backend.variable(1)
   self.assertEqual(keras.backend.is_keras_tensor(x), False)
   x = keras.Input(shape=(1,))
   self.assertEqual(keras.backend.is_keras_tensor(x), True)
   with self.assertRaises(ValueError):
     keras.backend.is_keras_tensor(0)
예제 #5
0
    def test_clone_sequential_model(self):
        with self.test_session():
            val_a = np.random.random((10, 4))
            val_out = np.random.random((10, 4))

            model = keras.models.Sequential()
            model.add(keras.layers.Dense(4, input_shape=(4, )))
            model.add(keras.layers.Dropout(0.5))
            model.add(keras.layers.Dense(4))

        # Everything should work in a new session.
        keras.backend.clear_session()

        with self.test_session():
            # With placeholder creation
            new_model = keras.models.clone_model(model)
            new_model.compile('rmsprop', 'mse')
            new_model.train_on_batch(val_a, val_out)

            # On top of new tensor
            input_a = keras.Input(shape=(4, ))
            new_model = keras.models.clone_model(model, input_tensors=input_a)
            new_model.compile('rmsprop', 'mse')
            new_model.train_on_batch(val_a, val_out)

            # On top of new, non-Keras tensor
            input_a = keras.backend.variable(val_a)
            new_model = keras.models.clone_model(model, input_tensors=input_a)
            new_model.compile('rmsprop', 'mse')
            new_model.train_on_batch(None, val_out)
예제 #6
0
    def test_conv_lstm(self):
        num_row = 3
        num_col = 3
        filters = 2
        num_samples = 1
        input_channel = 2
        input_num_row = 5
        input_num_col = 5
        sequence_len = 2
        for data_format in ['channels_first', 'channels_last']:
            if data_format == 'channels_first':
                inputs = np.random.rand(num_samples, sequence_len,
                                        input_channel, input_num_row,
                                        input_num_col)
            else:
                inputs = np.random.rand(num_samples, sequence_len,
                                        input_num_row, input_num_col,
                                        input_channel)

            for return_sequences in [True, False]:
                with self.test_session():
                    # test for return state:
                    x = keras.Input(batch_shape=inputs.shape)
                    kwargs = {
                        'data_format': data_format,
                        'return_sequences': return_sequences,
                        'return_state': True,
                        'stateful': True,
                        'filters': filters,
                        'kernel_size': (num_row, num_col),
                        'padding': 'valid'
                    }
                    layer = keras.layers.ConvLSTM2D(**kwargs)
                    layer.build(inputs.shape)
                    outputs = layer(x)
                    _, states = outputs[0], outputs[1:]
                    self.assertEqual(len(states), 2)
                    model = keras.models.Model(x, states[0])
                    state = model.predict(inputs)
                    self.assertAllClose(keras.backend.eval(layer.states[0]),
                                        state,
                                        atol=1e-4)

                    # test for output shape:
                    testing_utils.layer_test(keras.layers.ConvLSTM2D,
                                             kwargs={
                                                 'data_format': data_format,
                                                 'return_sequences':
                                                 return_sequences,
                                                 'filters': filters,
                                                 'kernel_size':
                                                 (num_row, num_col),
                                                 'padding': 'valid'
                                             },
                                             input_shape=inputs.shape)
예제 #7
0
    def test_clone_functional_model(self):
        with self.test_session():
            val_a = np.random.random((10, 4))
            val_b = np.random.random((10, 4))
            val_out = np.random.random((10, 4))

            input_a = keras.Input(shape=(4, ))
            input_b = keras.Input(shape=(4, ))
            dense_1 = keras.layers.Dense(4, )
            dense_2 = keras.layers.Dense(4, )

            x_a = dense_1(input_a)
            x_a = keras.layers.Dropout(0.5)(x_a)
            x_b = dense_1(input_b)
            x_a = dense_2(x_a)
            outputs = keras.layers.add([x_a, x_b])
            model = keras.models.Model([input_a, input_b], outputs)

        # Everything should work in a new session.
        keras.backend.clear_session()

        with self.test_session():
            # With placeholder creation
            new_model = keras.models.clone_model(model)
            new_model.compile('rmsprop', 'mse')
            new_model.train_on_batch([val_a, val_b], val_out)

            # On top of new tensors
            input_a = keras.Input(shape=(4, ), name='a')
            input_b = keras.Input(shape=(4, ), name='b')
            new_model = keras.models.clone_model(
                model, input_tensors=[input_a, input_b])
            new_model.compile('rmsprop', 'mse')
            new_model.train_on_batch([val_a, val_b], val_out)

            # On top of new, non-Keras tensors
            input_a = keras.backend.variable(val_a)
            input_b = keras.backend.variable(val_b)
            new_model = keras.models.clone_model(
                model, input_tensors=[input_a, input_b])
            new_model.compile('rmsprop', 'mse')
            new_model.train_on_batch(None, val_out)
예제 #8
0
파일: lstm_test.py 프로젝트: lengjia/RRL
    def test_specify_state_with_masking(self):
        num_states = 2
        timesteps = 3
        embedding_dim = 4
        units = 3
        num_samples = 2

        with self.test_session():
            inputs = keras.Input((timesteps, embedding_dim))
            _ = keras.layers.Masking()(inputs)
            initial_state = [keras.Input((units, )) for _ in range(num_states)]
            output = keras.layers.LSTM(units)(inputs,
                                              initial_state=initial_state)

            model = keras.models.Model([inputs] + initial_state, output)
            model.compile(loss='categorical_crossentropy', optimizer='adam')

            inputs = np.random.random((num_samples, timesteps, embedding_dim))
            initial_state = [
                np.random.random((num_samples, units))
                for _ in range(num_states)
            ]
            targets = np.random.random((num_samples, units))
            model.train_on_batch([inputs] + initial_state, targets)
예제 #9
0
파일: lstm_test.py 프로젝트: lengjia/RRL
    def test_state_reuse(self):
        timesteps = 3
        embedding_dim = 4
        units = 3
        num_samples = 2

        with self.test_session():
            inputs = keras.Input(batch_shape=(num_samples, timesteps,
                                              embedding_dim))
            layer = keras.layers.LSTM(units,
                                      return_state=True,
                                      return_sequences=True)
            outputs = layer(inputs)
            output, state = outputs[0], outputs[1:]
            output = keras.layers.LSTM(units)(output, initial_state=state)
            model = keras.models.Model(inputs, output)

            inputs = np.random.random((num_samples, timesteps, embedding_dim))
            outputs = model.predict(inputs)
예제 #10
0
    def test_layer_sharing_at_heterogenous_depth(self):
        with self.test_session():
            x_val = np.random.random((10, 5))

            x = keras.Input(shape=(5, ))
            a = keras.layers.Dense(5, name='A')
            b = keras.layers.Dense(5, name='B')
            output = a(b(a(b(x))))
            m = keras.models.Model(x, output)

            output_val = m.predict(x_val)

            config = m.get_config()
            weights = m.get_weights()

            m2 = keras.models.Model.from_config(config)
            m2.set_weights(weights)

            output_val_2 = m2.predict(x_val)
            self.assertAllClose(output_val, output_val_2, atol=1e-6)
예제 #11
0
    def test_model_cloning_invalid_use_cases(self):
        seq_model = keras.models.Sequential()
        seq_model.add(keras.layers.Dense(4, input_shape=(4, )))

        x = keras.Input((4, ))
        y = keras.layers.Dense(4)(x)
        fn_model = keras.models.Model(x, y)

        with self.assertRaises(ValueError):
            keras.models._clone_functional_model(seq_model)
        with self.assertRaises(ValueError):
            keras.models._clone_functional_model(None)
        with self.assertRaises(ValueError):
            keras.models._clone_sequential_model(fn_model)

        with self.assertRaises(ValueError):
            keras.models._clone_sequential_model(seq_model,
                                                 input_tensors=[x, x])
        with self.assertRaises(ValueError):
            keras.models._clone_sequential_model(seq_model, input_tensors=y)
예제 #12
0
파일: lstm_test.py 프로젝트: lengjia/RRL
    def test_return_state(self):
        num_states = 2
        timesteps = 3
        embedding_dim = 4
        units = 3
        num_samples = 2

        with self.test_session():
            inputs = keras.Input(batch_shape=(num_samples, timesteps,
                                              embedding_dim))
            layer = keras.layers.LSTM(units, return_state=True, stateful=True)
            outputs = layer(inputs)
            state = outputs[1:]
            assert len(state) == num_states
            model = keras.models.Model(inputs, state[0])

            inputs = np.random.random((num_samples, timesteps, embedding_dim))
            state = model.predict(inputs)
            self.assertAllClose(keras.backend.eval(layer.states[0]),
                                state,
                                atol=1e-4)
예제 #13
0
파일: lstm_test.py 프로젝트: lengjia/RRL
    def test_specify_initial_state_non_keras_tensor(self):
        num_states = 2
        timesteps = 3
        embedding_dim = 4
        units = 3
        num_samples = 2

        with self.test_session():
            # Test with non-Keras tensor
            inputs = keras.Input((timesteps, embedding_dim))
            initial_state = [
                keras.backend.random_normal_variable(
                    (num_samples, units), 0, 1) for _ in range(num_states)
            ]
            layer = keras.layers.LSTM(units)
            output = layer(inputs, initial_state=initial_state)

            model = keras.models.Model(inputs, output)
            model.compile(loss='categorical_crossentropy', optimizer='adam')

            inputs = np.random.random((num_samples, timesteps, embedding_dim))
            targets = np.random.random((num_samples, units))
            model.train_on_batch(inputs, targets)
예제 #14
0
    def test_TensorBoard_multi_input_output(self):
        np.random.seed(1337)
        tmpdir = self.get_temp_dir()
        self.addCleanup(shutil.rmtree, tmpdir)

        with self.test_session():
            filepath = os.path.join(tmpdir, 'logs')

            (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
                train_samples=TRAIN_SAMPLES,
                test_samples=TEST_SAMPLES,
                input_shape=(INPUT_DIM, ),
                num_classes=NUM_CLASSES)
            y_test = keras.utils.to_categorical(y_test)
            y_train = keras.utils.to_categorical(y_train)

            def data_generator(train):
                if train:
                    max_batch_index = len(x_train) // BATCH_SIZE
                else:
                    max_batch_index = len(x_test) // BATCH_SIZE
                i = 0
                while 1:
                    if train:
                        # simulate multi-input/output models
                        yield ([x_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]] *
                               2,
                               [y_train[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]] *
                               2)
                    else:
                        yield ([x_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]] *
                               2,
                               [y_test[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]] *
                               2)
                    i += 1
                    i %= max_batch_index

            inp1 = keras.Input((INPUT_DIM, ))
            inp2 = keras.Input((INPUT_DIM, ))
            inp = keras.layers.add([inp1, inp2])
            hidden = keras.layers.Dense(2, activation='relu')(inp)
            hidden = keras.layers.Dropout(0.1)(hidden)
            output1 = keras.layers.Dense(NUM_CLASSES,
                                         activation='softmax')(hidden)
            output2 = keras.layers.Dense(NUM_CLASSES,
                                         activation='softmax')(hidden)
            model = keras.models.Model([inp1, inp2], [output1, output2])
            model.compile(loss='categorical_crossentropy',
                          optimizer='sgd',
                          metrics=['accuracy'])

            # we must generate new callbacks for each test, as they aren't stateless
            def callbacks_factory(histogram_freq):
                return [
                    keras.callbacks.TensorBoard(
                        log_dir=filepath,
                        histogram_freq=histogram_freq,
                        write_images=True,
                        write_grads=True,
                        embeddings_freq=1,
                        embeddings_layer_names=['dense_1'],
                        batch_size=5)
                ]

            # fit without validation data
            model.fit([x_train] * 2, [y_train] * 2,
                      batch_size=BATCH_SIZE,
                      callbacks=callbacks_factory(histogram_freq=0),
                      epochs=3)

            # fit with validation data and accuracy
            model.fit([x_train] * 2, [y_train] * 2,
                      batch_size=BATCH_SIZE,
                      validation_data=([x_test] * 2, [y_test] * 2),
                      callbacks=callbacks_factory(histogram_freq=1),
                      epochs=2)

            # fit generator without validation data
            model.fit_generator(data_generator(True),
                                len(x_train),
                                epochs=2,
                                callbacks=callbacks_factory(histogram_freq=0))

            # fit generator with validation data and accuracy
            model.fit_generator(data_generator(True),
                                len(x_train),
                                epochs=2,
                                validation_data=([x_test] * 2, [y_test] * 2),
                                callbacks=callbacks_factory(histogram_freq=1))
            assert os.path.isdir(filepath)
예제 #15
0
    def test_weight_preprocessing(self):
        input_dim = 3
        output_dim = 3
        size = 2
        cases = [
            [
                (keras.layers.Bidirectional(keras.layers.SimpleRNN(2))),
                [np.random.random((2, 1)),
                 np.random.random((2, 1))],
                (None, 3, 2),
            ],
            [
                (keras.layers.TimeDistributed(keras.layers.Dense(1))),
                [np.random.random((2, 1)),
                 np.random.random((1, ))],
                (None, 3, 2),
            ],
            [
                (keras.layers.Conv1D(output_dim, size, use_bias=False)),
                [np.random.random((output_dim, input_dim, size, 1))],
                (None, 4, input_dim),
            ],
            [
                (keras.layers.Conv2D(output_dim,
                                     size,
                                     use_bias=False,
                                     data_format='channels_first')),
                [np.random.random((output_dim, input_dim, size, size))],
                (None, input_dim, 4, 4),
            ],
            [
                (keras.layers.Conv2DTranspose(output_dim,
                                              size,
                                              use_bias=False,
                                              data_format='channels_first')),
                [np.random.random((output_dim, input_dim, size, size))],
                (None, input_dim, 4, 4),
            ],
            [
                (keras.layers.Conv2DTranspose(output_dim,
                                              size,
                                              use_bias=False,
                                              data_format='channels_last')),
                [np.random.random((size, size, input_dim, output_dim))],
                (None, 4, 4, input_dim),
            ],
            [
                (keras.layers.Conv3D(output_dim,
                                     size,
                                     use_bias=False,
                                     data_format='channels_first')),
                [np.random.random((output_dim, input_dim, size, size, size))],
                (None, input_dim, 4, 4, 4),
            ],
            [
                (keras.layers.GRU(output_dim)),
                [
                    np.random.random((input_dim, output_dim)),
                    np.random.random((output_dim, output_dim)),
                    np.random.random((output_dim, )),
                    np.random.random((input_dim, output_dim)),
                    np.random.random((output_dim, output_dim)),
                    np.random.random((output_dim, )),
                    np.random.random((input_dim, output_dim)),
                    np.random.random((output_dim, output_dim)),
                    np.random.random((output_dim, ))
                ],
                (None, 4, input_dim),
            ],
            [
                (keras.layers.LSTM(output_dim)),
                [
                    np.random.random((input_dim, output_dim)),
                    np.random.random((output_dim, output_dim)),
                    np.random.random((output_dim, )),
                    np.random.random((input_dim, output_dim)),
                    np.random.random((output_dim, output_dim)),
                    np.random.random((output_dim, )),
                    np.random.random((input_dim, output_dim)),
                    np.random.random((output_dim, output_dim)),
                    np.random.random((output_dim, )),
                    np.random.random((input_dim, output_dim)),
                    np.random.random((output_dim, output_dim)),
                    np.random.random((output_dim, ))
                ],
                (None, 4, input_dim),
            ],
        ]
        for layer, weights, input_shape in cases:
            layer.build(input_shape)
            _ = keras.engine.topology.preprocess_weights_for_loading(
                layer, weights, original_keras_version='1')

        model = keras.models.Sequential([keras.layers.Dense(2, input_dim=2)])
        _ = keras.engine.topology.preprocess_weights_for_loading(
            model, model.weights, original_keras_version='1')

        x = keras.Input((2, ))
        y = keras.layers.Dense(2)(x)
        model = keras.models.Model(x, y)
        _ = keras.engine.topology.preprocess_weights_for_loading(
            model, model.weights, original_keras_version='1')
예제 #16
0
    def test_generator_methods(self):
        arr_data = np.random.random((50, 2))
        arr_labels = np.random.random((50, ))

        def custom_generator():
            batch_size = 10
            n_samples = 50
            while True:
                batch_index = np.random.randint(0, n_samples - batch_size)
                start = batch_index
                end = start + batch_size
                x = arr_data[start:end]
                y = arr_labels[start:end]
                yield x, y

        with self.test_session():
            x = keras.Input((2, ))
            y = keras.layers.Dense(1)(x)
            fn_model = keras.models.Model(x, y)
            fn_model.compile(loss='mse', optimizer='sgd')

            seq_model = keras.models.Sequential()
            seq_model.add(keras.layers.Dense(1, input_shape=(2, )))
            seq_model.compile(loss='mse', optimizer='sgd')

            for model in [fn_model, seq_model]:
                model.fit_generator(custom_generator(),
                                    steps_per_epoch=5,
                                    epochs=1,
                                    verbose=1,
                                    max_queue_size=10,
                                    workers=4,
                                    use_multiprocessing=True)
                model.fit_generator(custom_generator(),
                                    steps_per_epoch=5,
                                    epochs=1,
                                    verbose=1,
                                    max_queue_size=10,
                                    use_multiprocessing=False)
                model.fit_generator(custom_generator(),
                                    steps_per_epoch=5,
                                    epochs=1,
                                    verbose=1,
                                    max_queue_size=10,
                                    use_multiprocessing=False,
                                    validation_data=custom_generator(),
                                    validation_steps=10)
                model.predict_generator(custom_generator(),
                                        steps=5,
                                        max_queue_size=10,
                                        workers=2,
                                        use_multiprocessing=True)
                model.predict_generator(custom_generator(),
                                        steps=5,
                                        max_queue_size=10,
                                        use_multiprocessing=False)
                model.evaluate_generator(custom_generator(),
                                         steps=5,
                                         max_queue_size=10,
                                         workers=2,
                                         use_multiprocessing=True)
                model.evaluate_generator(custom_generator(),
                                         steps=5,
                                         max_queue_size=10,
                                         use_multiprocessing=False)

                # Test legacy API
                model.fit_generator(custom_generator(),
                                    steps_per_epoch=5,
                                    epochs=1,
                                    verbose=1,
                                    max_q_size=10,
                                    workers=4,
                                    pickle_safe=True)
                model.predict_generator(custom_generator(),
                                        steps=5,
                                        max_q_size=10,
                                        workers=2,
                                        pickle_safe=True)
                model.evaluate_generator(custom_generator(),
                                         steps=5,
                                         max_q_size=10,
                                         workers=2,
                                         pickle_safe=True)
예제 #17
0
    def test_class_weight_invalid_use_case(self):
        num_classes = 5
        train_samples = 1000
        test_samples = 1000
        input_dim = 5
        timesteps = 3

        with self.test_session():
            model = keras.models.Sequential()
            model.add(
                keras.layers.TimeDistributed(keras.layers.Dense(num_classes),
                                             input_shape=(timesteps,
                                                          input_dim)))
            model.add(keras.layers.Activation('softmax'))
            model.compile(loss='binary_crossentropy', optimizer='rmsprop')

            (x_train, y_train), _ = testing_utils.get_test_data(
                train_samples=train_samples,
                test_samples=test_samples,
                input_shape=(input_dim, ),
                num_classes=num_classes)
            # convert class vectors to binary class matrices
            y_train = keras.utils.to_categorical(y_train, num_classes)
            class_weight = dict([(i, 1.) for i in range(num_classes)])

            del class_weight[1]
            with self.assertRaises(ValueError):
                model.fit(x_train,
                          y_train,
                          epochs=0,
                          verbose=0,
                          class_weight=class_weight)

            with self.assertRaises(ValueError):
                model.compile(loss='binary_crossentropy',
                              optimizer='rmsprop',
                              sample_weight_mode=[])

            # Build multi-output model
            x = keras.Input((3, ))
            y1 = keras.layers.Dense(4, name='1')(x)
            y2 = keras.layers.Dense(4, name='2')(x)
            model = keras.models.Model(x, [y1, y2])
            model.compile(optimizer='rmsprop', loss='mse')
            x_np = np.random.random((10, 3))
            y_np = np.random.random((10, 4))
            w_np = np.random.random((10, ))
            # This will work
            model.fit(x_np, [y_np, y_np], epochs=1, sample_weight={'1': w_np})
            # These will not
            with self.assertRaises(ValueError):
                model.fit(x_np, [y_np, y_np], epochs=1, sample_weight=[w_np])
            with self.assertRaises(TypeError):
                model.fit(x_np, [y_np, y_np], epochs=1, sample_weight=w_np)
            with self.assertRaises(ValueError):
                bad_w_np = np.random.random((11, ))
                model.fit(x_np, [y_np, y_np],
                          epochs=1,
                          sample_weight={'1': bad_w_np})
            with self.assertRaises(ValueError):
                bad_w_np = np.random.random((10, 2))
                model.fit(x_np, [y_np, y_np],
                          epochs=1,
                          sample_weight={'1': bad_w_np})
            with self.assertRaises(ValueError):
                bad_w_np = np.random.random((10, 2, 2))
                model.fit(x_np, [y_np, y_np],
                          epochs=1,
                          sample_weight={'1': bad_w_np})