コード例 #1
0
ファイル: test_models.py プロジェクト: AI42/keras
def test_recursive():
    # test layer-like API

    graph = containers.Graph()
    graph.add_input(name='input1', input_shape=(32,))
    graph.add_node(Dense(16), name='dense1', input='input1')
    graph.add_node(Dense(4), name='dense2', input='input1')
    graph.add_node(Dense(4), name='dense3', input='dense1')
    graph.add_output(name='output1', inputs=['dense2', 'dense3'],
                     merge_mode='sum')

    seq = Sequential()
    seq.add(Dense(32, input_shape=(32,)))
    seq.add(graph)
    seq.add(Dense(4))

    seq.compile('rmsprop', 'mse')

    seq.fit(X_train_graph, y_train_graph, batch_size=10, nb_epoch=10)
    loss = seq.evaluate(X_test_graph, y_test_graph)
    assert(loss < 2.5)

    loss = seq.evaluate(X_test_graph, y_test_graph, show_accuracy=True)
    seq.predict(X_test_graph)
    seq.get_config(verbose=1)
コード例 #2
0
ファイル: test_graph_model.py プロジェクト: andyljones/keras
    def test_recursive(self):
        print('test layer-like API')

        graph = containers.Graph()
        graph.add_input(name='input1', ndim=2)
        graph.add_node(Dense(32, 16), name='dense1', input='input1')
        graph.add_node(Dense(32, 4), name='dense2', input='input1')
        graph.add_node(Dense(16, 4), name='dense3', input='dense1')
        graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum')

        seq = Sequential()
        seq.add(Dense(32, 32, name='first_seq_dense'))
        seq.add(graph)
        seq.add(Dense(4, 4, name='last_seq_dense'))

        seq.compile('rmsprop', 'mse')

        history = seq.fit(X_train, y_train, batch_size=10, nb_epoch=10)
        loss = seq.evaluate(X_test, y_test)
        print(loss)
        assert(loss < 1.4)

        loss = seq.evaluate(X_test, y_test, show_accuracy=True)
        pred = seq.predict(X_test)
        seq.get_config(verbose=1)
コード例 #3
0
ファイル: test_graph_model.py プロジェクト: shubham1310/keras
    def test_recursive(self):
        print("test layer-like API")

        graph = containers.Graph()
        graph.add_input(name="input1", ndim=2)
        graph.add_node(Dense(32, 16), name="dense1", input="input1")
        graph.add_node(Dense(32, 4), name="dense2", input="input1")
        graph.add_node(Dense(16, 4), name="dense3", input="dense1")
        graph.add_output(name="output1", inputs=["dense2", "dense3"], merge_mode="sum")

        seq = Sequential()
        seq.add(Dense(32, 32, name="first_seq_dense"))
        seq.add(graph)
        seq.add(Dense(4, 4, name="last_seq_dense"))

        seq.compile("rmsprop", "mse")

        history = seq.fit(X_train, y_train, batch_size=10, nb_epoch=10)
        loss = seq.evaluate(X_test, y_test)
        print(loss)
        assert loss < 2.5

        loss = seq.evaluate(X_test, y_test, show_accuracy=True)
        pred = seq.predict(X_test)
        seq.get_config(verbose=1)
コード例 #4
0
def test_merge_overlap():
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, left], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_train, y_train, verbose=0)
    assert(loss < 0.7)
    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)
    model.get_config(verbose=0)

    fname = 'test_merge_overlap_temp.h5'
    model.save_weights(fname, overwrite=True)
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_train, y_train, verbose=0)
    assert(loss == nloss)
コード例 #5
0
ファイル: wrappers_test.py プロジェクト: dansbecker/keras
def test_Bidirectional():
    rnn = layers.SimpleRNN
    samples = 2
    dim = 2
    timesteps = 2
    output_dim = 2
    dropout_rate = 0.2
    for mode in ['sum', 'concat']:
        x = np.random.random((samples, timesteps, dim))
        target_dim = 2 * output_dim if mode == 'concat' else output_dim
        y = np.random.random((samples, target_dim))

        # test with Sequential model
        model = Sequential()
        model.add(wrappers.Bidirectional(rnn(output_dim, dropout=dropout_rate,
                                             recurrent_dropout=dropout_rate),
                                         merge_mode=mode,
                                         input_shape=(timesteps, dim)))
        model.compile(loss='mse', optimizer='sgd')
        model.fit(x, y, epochs=1, batch_size=1)

        # test config
        model.get_config()
        model = model_from_json(model.to_json())
        model.summary()

        # test stacked bidirectional layers
        model = Sequential()
        model.add(wrappers.Bidirectional(rnn(output_dim,
                                             return_sequences=True),
                                         merge_mode=mode,
                                         input_shape=(timesteps, dim)))
        model.add(wrappers.Bidirectional(rnn(output_dim), merge_mode=mode))
        model.compile(loss='mse', optimizer='sgd')
        model.fit(x, y, epochs=1, batch_size=1)

        # test with functional API
        inputs = Input((timesteps, dim))
        outputs = wrappers.Bidirectional(rnn(output_dim, dropout=dropout_rate,
                                             recurrent_dropout=dropout_rate),
                                         merge_mode=mode)(inputs)
        if dropout_rate and K.backend() != 'cntk':
            # Dropout is disabled with CNTK for now.
            assert outputs._uses_learning_phase
        model = Model(inputs, outputs)
        model.compile(loss='mse', optimizer='sgd')
        model.fit(x, y, epochs=1, batch_size=1)

        # Bidirectional and stateful
        inputs = Input(batch_shape=(1, timesteps, dim))
        outputs = wrappers.Bidirectional(rnn(output_dim, stateful=True),
                                         merge_mode=mode)(inputs)
        model = Model(inputs, outputs)
        model.compile(loss='mse', optimizer='sgd')
        model.fit(x, y, epochs=1, batch_size=1)
コード例 #6
0
    def test_merge_concat(self):
        print('Test merge: concat')
        left = Sequential()
        left.add(Dense(nb_hidden, input_shape=(input_dim,)))
        left.add(Activation('relu'))

        right = Sequential()
        right.add(Dense(nb_hidden, input_shape=(input_dim,)))
        right.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([left, right], mode='concat'))
        model.add(Dense(nb_class))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], y_test))
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], y_test))
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

        loss = model.evaluate([X_train, X_train], y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.7:
            raise Exception('Score too low, learning issue.')
        model.predict([X_test, X_test], verbose=0)
        model.predict_classes([X_test, X_test], verbose=0)
        model.predict_proba([X_test, X_test], verbose=0)
        model.get_config(verbose=0)

        print('test weight saving')
        fname = 'test_merge_concat_temp.h5'
        model.save_weights(fname, overwrite=True)
        left = Sequential()
        left.add(Dense(nb_hidden, input_shape=(input_dim,)))
        left.add(Activation('relu'))

        right = Sequential()
        right.add(Dense(nb_hidden, input_shape=(input_dim,)))
        right.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([left, right], mode='concat'))

        model.add(Dense(nb_class))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        model.load_weights(fname)
        os.remove(fname)

        nloss = model.evaluate([X_train, X_train], y_train, verbose=0)
        assert(loss == nloss)
コード例 #7
0
ファイル: test_models.py プロジェクト: AI42/keras
def test_siamese_1():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Siamese(Dense(nb_hidden), [left, right], merge_mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss < 0.8)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)
    model.get_config(verbose=0)

    # test weight saving
    fname = 'test_siamese_1.h5'
    model.save_weights(fname, overwrite=True)
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Siamese(Dense(nb_hidden), [left, right], merge_mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))

    model.load_weights(fname)
    os.remove(fname)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)
コード例 #8
0
def test_TimeDistributed():
    # first, test with Dense layer
    model = Sequential()
    model.add(wrappers.TimeDistributed(core.Dense(2), input_shape=(3, 4)))
    model.add(core.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.fit(np.random.random((10, 3, 4)), np.random.random((10, 3, 2)), nb_epoch=1, batch_size=10)

    # test config
    model.get_config()

    # compare to TimeDistributedDense
    test_input = np.random.random((1, 3, 4))
    test_output = model.predict(test_input)
    weights = model.layers[0].get_weights()

    reference = Sequential()
    reference.add(core.TimeDistributedDense(2, input_shape=(3, 4), weights=weights))
    reference.add(core.Activation('relu'))
    reference.compile(optimizer='rmsprop', loss='mse')

    reference_output = reference.predict(test_input)
    assert_allclose(test_output, reference_output, atol=1e-05)

    # test when specifying a batch_input_shape
    reference = Sequential()
    reference.add(core.TimeDistributedDense(2, batch_input_shape=(1, 3, 4), weights=weights))
    reference.add(core.Activation('relu'))
    reference.compile(optimizer='rmsprop', loss='mse')

    reference_output = reference.predict(test_input)
    assert_allclose(test_output, reference_output, atol=1e-05)

    # test with Convolution2D
    model = Sequential()
    model.add(wrappers.TimeDistributed(convolutional.Convolution2D(5, 2, 2, border_mode='same'), input_shape=(2, 3, 4, 4)))
    model.add(core.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.train_on_batch(np.random.random((1, 2, 3, 4, 4)), np.random.random((1, 2, 5, 4, 4)))

    model = model_from_json(model.to_json())
    model.summary()

    # test stacked layers
    model = Sequential()
    model.add(wrappers.TimeDistributed(core.Dense(2), input_shape=(3, 4)))
    model.add(wrappers.TimeDistributed(core.Dense(3)))
    model.add(core.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')

    model.fit(np.random.random((10, 3, 4)), np.random.random((10, 3, 3)), nb_epoch=1, batch_size=10)
コード例 #9
0
    def test_sequential(self):
        print('Test sequential')
        model = Sequential()
        model.add(Dense(nb_hidden, input_shape=(input_dim,)))
        model.add(Activation('relu'))
        model.add(Dense(nb_class))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

        model.train_on_batch(X_train[:32], y_train[:32])

        loss = model.evaluate(X_train, y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.7:
            raise Exception('Score too low, learning issue.')
        model.predict(X_test, verbose=0)
        model.predict_classes(X_test, verbose=0)
        model.predict_proba(X_test, verbose=0)
        model.get_config(verbose=0)

        print('test weight saving')
        fname = 'test_sequential_temp.h5'
        model.save_weights(fname, overwrite=True)
        model = Sequential()
        model.add(Dense(nb_hidden, input_shape=(input_dim,)))
        model.add(Activation('relu'))
        model.add(Dense(nb_class))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        model.load_weights(fname)
        os.remove(fname)

        nloss = model.evaluate(X_train, y_train, verbose=0)
        assert(loss == nloss)

        # test json serialization
        json_data = model.to_json()
        model = model_from_json(json_data)

        # test yaml serialization
        yaml_data = model.to_yaml()
        model = model_from_yaml(yaml_data)
コード例 #10
0
def test_recursive():
    # test layer-like API
    graph = Graph()
    graph.add_input(name='input1', input_shape=(32,))
    graph.add_node(Dense(16), name='dense1', input='input1')
    graph.add_node(Dense(4), name='dense2', input='input1')
    graph.add_node(Dense(4), name='dense3', input='dense1')
    graph.add_output(name='output1', inputs=['dense2', 'dense3'],
                     merge_mode='sum')

    seq = Sequential()
    seq.add(Dense(32, input_shape=(32,)))
    seq.add(graph)
    seq.add(Dense(4))

    seq.compile('rmsprop', 'mse')

    seq.fit(X_train_graph, y_train_graph, batch_size=10, nb_epoch=10)
    loss = seq.evaluate(X_test_graph, y_test_graph)

    # test serialization
    config = seq.get_config()
    new_graph = Sequential.from_config(config)

    seq.summary()
    json_str = seq.to_json()
    new_graph = model_from_json(json_str)

    yaml_str = seq.to_yaml()
    new_graph = model_from_yaml(yaml_str)
コード例 #11
0
def test_image_classification():
    np.random.seed(1337)
    input_shape = (16, 16, 3)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
                                                         num_test=200,
                                                         input_shape=input_shape,
                                                         classification=True,
                                                         num_classes=4)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    model = Sequential([
        layers.Conv2D(filters=8, kernel_size=3,
                      activation='relu',
                      input_shape=input_shape),
        layers.MaxPooling2D(pool_size=2),
        layers.Conv2D(filters=4, kernel_size=(3, 3),
                      activation='relu', padding='same'),
        layers.GlobalAveragePooling2D(),
        layers.Dense(y_test.shape[-1], activation='softmax')
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    model.summary()
    history = model.fit(x_train, y_train, epochs=10, batch_size=16,
                        validation_data=(x_test, y_test),
                        verbose=0)
    assert history.history['val_acc'][-1] > 0.75
    config = model.get_config()
    model = Sequential.from_config(config)
コード例 #12
0
ファイル: test_vector_data_tasks.py プロジェクト: 5ke/keras
def test_vector_classification():
    '''
    Classify random float vectors into 2 classes with logistic regression
    using 2 layer neural network with ReLU hidden units.
    '''
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
                                                         num_test=200,
                                                         input_shape=(20,),
                                                         classification=True,
                                                         num_classes=2)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    # Test with Sequential API
    model = Sequential([
        layers.Dense(16, input_shape=(x_train.shape[-1],), activation='relu'),
        layers.Dense(8),
        layers.Activation('relu'),
        layers.Dense(y_train.shape[-1], activation='softmax')
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    model.summary()
    history = model.fit(x_train, y_train, epochs=15, batch_size=16,
                        validation_data=(x_test, y_test),
                        verbose=0)
    assert(history.history['val_acc'][-1] > 0.8)
    config = model.get_config()
    model = Sequential.from_config(config)
コード例 #13
0
def test_nested_sequential():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
    inner.add(Activation("relu"))
    inner.add(Dense(nb_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test, verbose=0)

    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)

    fname = "test_nested_sequential_temp.h5"
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
    inner.add(Activation("relu"))
    inner.add(Dense(nb_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert loss == nloss

    # test serialization
    config = model.get_config()
    new_model = Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    new_model = model_from_json(json_str)

    yaml_str = model.to_yaml()
    new_model = model_from_yaml(yaml_str)
コード例 #14
0
def gen_model(vocab_size=100, embedding_size=128, maxlen=100, output_size=6, hidden_layer_size=100, num_hidden_layers = 1, RNN_LAYER_TYPE="LSTM"):
    RNN_CLASS = LSTM
    if RNN_LAYER_TYPE == "GRU":
        RNN_CLASS = GRU
    logger.info("Parameters: vocab_size = %s, embedding_size = %s, maxlen = %s, output_size = %s, hidden_layer_size = %s, " %\
            (vocab_size, embedding_size, maxlen, output_size, hidden_layer_size))
    logger.info("Building Model")
    model = Sequential()
    logger.info("Init Model with vocab_size = %s, embedding_size = %s, maxlen = %s" % (vocab_size, embedding_size, maxlen))
    model.add(Embedding(vocab_size, embedding_size, input_length=maxlen))
    logger.info("Added Embedding Layer")
    model.add(Dropout(0.5))
    logger.info("Added Dropout Layer")
    for i in xrange(num_hidden_layers):
        model.add(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True))
        logger.info("Added %s Layer" % RNN_LAYER_TYPE)
        model.add(Dropout(0.5))
        logger.info("Added Dropout Layer")
    model.add(RNN_CLASS(output_dim=output_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True))
    logger.info("Added %s Layer" % RNN_LAYER_TYPE)
    model.add(Dropout(0.5))
    logger.info("Added Dropout Layer")
    model.add(TimeDistributedDense(output_size, activation="softmax"))
    logger.info("Added Dropout Layer")
    logger.info("Created model with following config:\n%s" % json.dumps(model.get_config(), indent=4))
    logger.info("Compiling model with optimizer %s" % optimizer)
    start_time = time.time()
    model.compile(loss='categorical_crossentropy', optimizer=optimizer)
    total_time = time.time() - start_time
    logger.info("Model compiled in %.4f seconds." % total_time)
    return model
コード例 #15
0
def test_temporal_classification():
    '''
    Classify temporal sequences of float numbers
    of length 3 into 2 classes using
    single layer of GRU units and softmax applied
    to the last activations of the units
    '''
    np.random.seed(1337)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=200,
                                                         num_test=20,
                                                         input_shape=(3, 4),
                                                         classification=True,
                                                         num_classes=2)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    model = Sequential()
    model.add(layers.GRU(8,
                         input_shape=(x_train.shape[1], x_train.shape[2])))
    model.add(layers.Dense(y_train.shape[-1], activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    model.summary()
    history = model.fit(x_train, y_train, epochs=4, batch_size=10,
                        validation_data=(x_test, y_test),
                        verbose=0)
    assert(history.history['acc'][-1] >= 0.8)
    config = model.get_config()
    model = Sequential.from_config(config)
コード例 #16
0
ファイル: test_sequential_model.py プロジェクト: 5ke/keras
def test_nested_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_split=0.1)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test, verbose=0)

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_nested_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
コード例 #17
0
ファイル: test_wrappers.py プロジェクト: awentzonline/keras
def test_Bidirectional():
    rnn = recurrent.SimpleRNN
    nb_sample = 2
    dim = 2
    timesteps = 2
    output_dim = 2
    for mode in ["sum", "concat"]:
        x = np.random.random((nb_sample, timesteps, dim))
        target_dim = 2 * output_dim if mode == "concat" else output_dim
        y = np.random.random((nb_sample, target_dim))

        # test with Sequential model
        model = Sequential()
        model.add(wrappers.Bidirectional(rnn(output_dim), merge_mode=mode, input_shape=(timesteps, dim)))
        model.compile(loss="mse", optimizer="sgd")
        model.fit(x, y, nb_epoch=1, batch_size=1)

        # test config
        model.get_config()
        model = model_from_json(model.to_json())
        model.summary()

        # test stacked bidirectional layers
        model = Sequential()
        model.add(
            wrappers.Bidirectional(
                rnn(output_dim, return_sequences=True), merge_mode=mode, input_shape=(timesteps, dim)
            )
        )
        model.add(wrappers.Bidirectional(rnn(output_dim), merge_mode=mode))
        model.compile(loss="mse", optimizer="sgd")
        model.fit(x, y, nb_epoch=1, batch_size=1)

        # test with functional API
        input = Input((timesteps, dim))
        output = wrappers.Bidirectional(rnn(output_dim), merge_mode=mode)(input)
        model = Model(input, output)
        model.compile(loss="mse", optimizer="sgd")
        model.fit(x, y, nb_epoch=1, batch_size=1)

        # Bidirectional and stateful
        input = Input(batch_shape=(1, timesteps, dim))
        output = wrappers.Bidirectional(rnn(output_dim, stateful=True), merge_mode=mode)(input)
        model = Model(input, output)
        model.compile(loss="mse", optimizer="sgd")
        model.fit(x, y, nb_epoch=1, batch_size=1)
コード例 #18
0
def test_merge_recursivity():
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    righter = Sequential()
    righter.add(Dense(nb_hidden, input_shape=(input_dim,)))
    righter.add(Activation('relu'))

    intermediate = Sequential()
    intermediate.add(Merge([left, right], mode='sum'))
    intermediate.add(Dense(nb_hidden))
    intermediate.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([intermediate, righter], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test, X_test], y_test))
    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test, X_test], y_test))
    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_train, X_train, X_train], y_train, verbose=0)
    assert(loss < 0.7)

    model.predict([X_test, X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test, X_test], verbose=0)
    model.get_config(verbose=0)

    fname = 'test_merge_recursivity_temp.h5'
    model.save_weights(fname, overwrite=True)
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate([X_train, X_train, X_train], y_train, verbose=0)
    assert(loss == nloss)
コード例 #19
0
def test_merge_sum():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)

    # test weight saving
    fname = 'test_merge_sum_temp.h5'
    model.save_weights(fname, overwrite=True)
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))
    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))
    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.load_weights(fname)
    os.remove(fname)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
コード例 #20
0
def test_merge_concat():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    left = Sequential(name="branch_1")
    left.add(Dense(nb_hidden, input_shape=(input_dim,), name="dense_1"))
    left.add(Activation("relu", name="relu_1"))

    right = Sequential(name="branch_2")
    right.add(Dense(nb_hidden, input_shape=(input_dim,), name="dense_2"))
    right.add(Activation("relu", name="relu_2"))

    model = Sequential(name="merged_branches")
    model.add(Merge([left, right], mode="concat", name="merge"))
    model.add(Dense(nb_class, name="final_dense"))
    model.add(Activation("softmax", name="softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")

    model.fit(
        [X_train, X_train],
        y_train,
        batch_size=batch_size,
        nb_epoch=nb_epoch,
        verbose=0,
        validation_data=([X_test, X_test], y_test),
    )
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)
    model.get_config()

    fname = "test_merge_concat_temp.h5"
    model.save_weights(fname, overwrite=True)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert loss == nloss
コード例 #21
0
ファイル: nn.py プロジェクト: marek1840/Eksploracja
def create_network_model(input_size, out_size, model_path=None):
    # print input_size, out_size
    model = Sequential()
    h1 = 2048
    h2 = 1024
    # model.add(Dense(input_size, out_size, activation='softmax'))
    model.add(Dense(int(input_size), h1, activation='tanh'))
    model.add(Activation('tanh'))
    model.add(Dropout(0.2))
    model.add(Dense(h1, h2, activation='tanh'))
    model.add(Dropout(0.2))
    model.add(Dense(h2, out_size, activation='softmax'))
    if model_path is not None and os.path.isfile(model_path):
        model.load_weights(model_path)
    sgd = SGD(lr=0.1, momentum=0.9, decay=1.e-5, nesterov=True)
    # model.compile(loss='mse', optimizer='rmsprop')
    model.compile(loss='categorical_crossentropy', optimizer=sgd)
    # model.compile(loss=gps_loss, optimizer=sgd)
    print model.get_config(verbose=0)

    return model
コード例 #22
0
class AE:
    def __init__(self,X_train,input_dim=100):
        self.X_train = X_train[:, np.newaxis, :]
        print("X_train: ", X_train.shape)
        self.input_dim=input_dim
        self.ae = Sequential()
        self.ae = self.build_lstm_autoencoder(self.ae)
        self.ae.compile(loss='mean_squared_error', optimizer='adam')



    def build(self):
        # Do NOT use validation data with return output_reconstruction=True
		np.random.seed(0)
		self.ae.fit(self.X_train, self.X_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1)
		prefiler_Xtrain=self.ae.predict(self.X_train,verbose=1)
		print 'Modified X_train: ',prefiler_Xtrain.shape

    def build_lstm_autoencoder(self,autoencoder):
        # The TimeDistributedDense isn't really necessary, however you need a lot of GPU memory to do 784x394-394x784
        autoencoder.add(TimeDistributedDense(input_dim=self.input_dim,
                                         output_dim=self.input_dim))

        autoencoder.add(AutoEncoder(encoder=LSTM(input_dim=self.input_dim,
                                             output_dim=50,
                                             activation='tanh',
                                             return_sequences=True),
                                decoder=LSTM(input_dim=50,
                                             output_dim=self.input_dim,
                                             activation='tanh',
                                             return_sequences=True),
                                output_reconstruction=False))
        return autoencoder
    
    
    def configure(self):
        return self.ae.get_config(verbose=True)
    
    def get_feature(self,X_test):
        X_test=X_test[:,np.newaxis,:]
        print("Modified X_test:",X_test.shape)
        pred_test = self.ae.predict(X_test, verbose=1)
        print("X_test: ", pred_test.shape)
        return pred_test

    def save(self,filepath='LSTM_AE'):
        self.ae.save_weights(filepath)
    
    def load(self,filepath='LSTM_AE'):
        self.ae.load_weights(filepath)
コード例 #23
0
ファイル: test_sequential_model.py プロジェクト: 3dconv/keras
    def test_merge_recursivity(self):
        print('Test merge recursivity')

        left = Sequential()
        left.add(Dense(input_dim, nb_hidden))
        left.add(Activation('relu'))

        right = Sequential()
        right.add(Dense(input_dim, nb_hidden))
        right.add(Activation('relu'))

        righter = Sequential()
        righter.add(Dense(input_dim, nb_hidden))
        righter.add(Activation('relu'))

        intermediate = Sequential()
        intermediate.add(Merge([left, right], mode='sum'))
        intermediate.add(Dense(nb_hidden, nb_hidden))
        intermediate.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([intermediate, righter], mode='sum'))

        model.add(Dense(nb_hidden, nb_class))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test, X_test], y_test))
        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test, X_test], y_test))
        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

        loss = model.evaluate([X_train, X_train, X_train], y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.6:
            raise Exception('Score too low, learning issue.')
        preds = model.predict([X_test, X_test, X_test], verbose=0)
        classes = model.predict_classes([X_test, X_test, X_test], verbose=0)
        probas = model.predict_proba([X_test, X_test, X_test], verbose=0)
        print(model.get_config(verbose=1))

        model.save_weights('temp.h5', overwrite=True)
        model.load_weights('temp.h5')

        nloss = model.evaluate([X_train, X_train, X_train], y_train, verbose=0)
        print(nloss)
        assert(loss == nloss)
コード例 #24
0
ファイル: lstm.py プロジェクト: marek1840/Eksploracja
def create_network_model(input_size, out_size, model_path=None):
    model = Sequential()
    h1 = 1024
    h2 = 512
    h3 = 512
    model.add(LSTM(int(input_size), h1, return_sequences=True))
    model.add(Dropout(0.2))
    model.add(Dense(h1, h2))
    # changes according to used dataset
    # model.add(Activation('tanh'))
    model.add(LSTM(h2, 128))
    model.add(Dropout(0.2))
    model.add(Dense(128, out_size))
    model.add(Activation('tanh'))
    if os.path.isfile(model_path):
        model.load_weights(model_path)
    sgd = SGD(lr=0.1, momentum=0.9, decay=1.e-5, nesterov=True)
    # model.compile(loss='mse', optimizer='rmsprop')
    model.compile(loss='mse', optimizer=sgd)
    # model.compile(loss=gps_loss, optimizer=sgd)
    model.get_config(verbose=0)

    return model
コード例 #25
0
def test_merge_concat():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    left = Sequential(name='branch_1')
    left.add(Dense(nb_hidden, input_shape=(input_dim,), name='dense_1'))
    left.add(Activation('relu', name='relu_1'))

    right = Sequential(name='branch_2')
    right.add(Dense(nb_hidden, input_shape=(input_dim,), name='dense_2'))
    right.add(Activation('relu', name='relu_2'))

    model = Sequential(name='merged_branches')
    model.add(Merge([left, right], mode='concat', name='merge'))
    model.add(Dense(nb_class, name='final_dense'))
    model.add(Activation('softmax', name='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)
    model.get_config()

    fname = 'test_merge_concat_temp.h5'
    model.save_weights(fname, overwrite=True)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)
コード例 #26
0
    def test_merge_overlap(self):
        print('Test merge overlap')
        left = Sequential()
        left.add(Dense(nb_hidden, input_shape=(input_dim,)))
        left.add(Activation('relu'))

        model = Sequential()
        model.add(Merge([left, left], mode='sum'))
        model.add(Dense(nb_class))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

        model.train_on_batch(X_train[:32], y_train[:32])

        loss = model.evaluate(X_train, y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.7:
            raise Exception('Score too low, learning issue.')
        model.predict(X_test, verbose=0)
        model.predict_classes(X_test, verbose=0)
        model.predict_proba(X_test, verbose=0)
        model.get_config(verbose=0)

        model.save_weights('test_merge_overlap_temp.h5', overwrite=True)
        model.load_weights('test_merge_overlap_temp.h5')

        nloss = model.evaluate(X_train, y_train, verbose=0)
        print(nloss)
        assert(loss == nloss)
コード例 #27
0
def test_merge_overlap():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, left], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test, verbose=0)
    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)

    fname = 'test_merge_overlap_temp.h5'
    print(model.layers)
    model.save_weights(fname, overwrite=True)
    print(model.trainable_weights)

    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    new_model = Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    new_model = model_from_json(json_str)

    yaml_str = model.to_yaml()
    new_model = model_from_yaml(yaml_str)
コード例 #28
0
        k += 1
        kernel[h, w] = k % 5 - 2

k = 0
for h in range(0, 4):
    for w in range(0, 4):
        k += 1
        rec_kernel[h, w] = k % 5 - 2

parameters = [kernel, rec_kernel, bias]

model.set_weights(parameters)

l = 0
for b in range(0, 4):
    for h in range(0, 3):
        for c in range(0, 3):
            l += 1
            data[b, h, c] = l % 5 + 1

output = model.predict(
    data, batch_size=4)  # the batch_size has no impact on the result here

print(output)

print(model.summary())

print(model.get_config())

print(model.get_weights())
コード例 #29
0
ファイル: check_yaml.py プロジェクト: Libardo1/ml
batch_size = 32

(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features,
                                                      test_split=0.2)

X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)

model = Sequential()
model.add(Embedding(max_features, 128))
model.add(LSTM(128, 128))
model.add(Dropout(0.5))
model.add(Dense(128, 1, W_regularizer='identity', b_constraint='maxnorm'))
model.add(Activation('sigmoid'))

model.get_config(verbose=1)

#####################################
# save model w/o parameters to yaml #
#####################################

yaml_no_params = model.to_yaml()

no_param_model = model_from_yaml(yaml_no_params)
no_param_model.get_config(verbose=1)

######################################
# save multi-branch sequential model #
######################################

seq = Sequential()
コード例 #30
0
ファイル: wrappers_test.py プロジェクト: zhliaoli/keras
def test_TimeDistributed():
    # first, test with Dense layer
    model = Sequential()
    model.add(wrappers.TimeDistributed(core.Dense(2), input_shape=(3, 4)))
    model.add(core.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.fit(np.random.random((10, 3, 4)), np.random.random((10, 3, 2)), epochs=1, batch_size=10)

    # test config
    model.get_config()

    # test when specifying a batch_input_shape
    test_input = np.random.random((1, 3, 4))
    test_output = model.predict(test_input)
    weights = model.layers[0].get_weights()

    reference = Sequential()
    reference.add(wrappers.TimeDistributed(core.Dense(2), batch_input_shape=(1, 3, 4)))
    reference.add(core.Activation('relu'))
    reference.compile(optimizer='rmsprop', loss='mse')
    reference.layers[0].set_weights(weights)

    reference_output = reference.predict(test_input)
    assert_allclose(test_output, reference_output, atol=1e-05)

    # test with Embedding
    model = Sequential()
    model.add(wrappers.TimeDistributed(embeddings.Embedding(5, 6), batch_input_shape=(10, 3, 4), dtype='int32'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.fit(np.random.randint(5, size=(10, 3, 4), dtype='int32'), np.random.random((10, 3, 4, 6)), epochs=1, batch_size=10)

    # compare to not using batch_input_shape
    test_input = np.random.randint(5, size=(10, 3, 4), dtype='int32')
    test_output = model.predict(test_input)
    weights = model.layers[0].get_weights()

    reference = Sequential()
    reference.add(wrappers.TimeDistributed(embeddings.Embedding(5, 6), input_shape=(3, 4), dtype='int32'))
    reference.compile(optimizer='rmsprop', loss='mse')
    reference.layers[0].set_weights(weights)

    reference_output = reference.predict(test_input)
    assert_allclose(test_output, reference_output, atol=1e-05)

    # test with Conv2D
    model = Sequential()
    model.add(wrappers.TimeDistributed(convolutional.Conv2D(5, (2, 2), padding='same'), input_shape=(2, 4, 4, 3)))
    model.add(core.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.train_on_batch(np.random.random((1, 2, 4, 4, 3)), np.random.random((1, 2, 4, 4, 5)))

    model = model_from_json(model.to_json())
    model.summary()

    # test stacked layers
    model = Sequential()
    model.add(wrappers.TimeDistributed(core.Dense(2), input_shape=(3, 4)))
    model.add(wrappers.TimeDistributed(core.Dense(3)))
    model.add(core.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')

    model.fit(np.random.random((10, 3, 4)), np.random.random((10, 3, 3)), epochs=1, batch_size=10)

    # test wrapping Sequential model
    model = Sequential()
    model.add(core.Dense(3, input_dim=2))
    outer_model = Sequential()
    outer_model.add(wrappers.TimeDistributed(model, input_shape=(3, 2)))
    outer_model.compile(optimizer='rmsprop', loss='mse')
    outer_model.fit(np.random.random((10, 3, 2)), np.random.random((10, 3, 3)), epochs=1, batch_size=10)

    # test with functional API
    x = Input(shape=(3, 2))
    y = wrappers.TimeDistributed(model)(x)
    outer_model = Model(x, y)
    outer_model.compile(optimizer='rmsprop', loss='mse')
    outer_model.fit(np.random.random((10, 3, 2)), np.random.random((10, 3, 3)), epochs=1, batch_size=10)
コード例 #31
0
def fam(train_i, train_o, test_i, test_o):
    sess = tf.Session()
    K.set_session(sess)
    K.set_learning_phase(1)

    batch_size = 60
    nb_classes = len(MOD)
    nb_epoch = 20

    img_rows, img_cols = 2 * P * L, 2 * Np
    nb_filters = 96
    nb_pool = 2

    X_train, Y_train = shuffle_in_unison_inplace(np.array(train_i),
                                                 np.array(train_o))

    model = Sequential()
    model.add(
        Convolution2D(64,
                      11,
                      11,
                      subsample=(2, 2),
                      input_shape=(1, img_rows, img_cols)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(128, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.5))

    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.5))

    model.add(Dense(nb_classes, init='normal'))
    model.add(Activation('softmax', name="out"))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    """
    datagen = ImageDataGenerator(
        #featurewise_center=True,
        #featurewise_std_normalization=True,
        rotation_range=20,
        #width_shift_range=0.3,
        #height_shift_range=0.3,
        #zoom_range=[0,1.3],
        horizontal_flip=True,
        vertical_flip=True)

    datagen.fit(X_train)

    model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size,shuffle=True),
                    samples_per_epoch=len(X_train), nb_epoch=5,verbose=1,validation_data=(test_i[0], test_o[0]))

    """

    model.fit(X_train,
              Y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              verbose=1,
              shuffle=True,
              validation_data=(test_i[0], test_o[0]))

    for s in range(len(test_i)):
        if len(test_i[s]) == 0:
            continue
        X_test = test_i[s]
        Y_test = test_o[s]
        score = model.evaluate(X_test, Y_test, verbose=0)
        print("SNR", SNR[s], "Test accuracy:", score[1])

    K.set_learning_phase(0)
    config = model.get_config()
    weights = model.get_weights()

    new_model = Sequential.from_config(config)
    new_model.set_weights(weights)

    export_path = "/tmp/fam"
    export_version = 1

    labels_tensor = tf.constant(MOD)

    saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(saver)
    signature = exporter.classification_signature(
        input_tensor=new_model.input,
        classes_tensor=labels_tensor,
        scores_tensor=new_model.output)
    model_exporter.init(sess.graph.as_graph_def(),
                        default_graph_signature=signature)
    model_exporter.export(export_path, tf.constant(export_version), sess)
コード例 #32
0
ファイル: vgg16_model.py プロジェクト: al13mi/Master-thesis
def vgg16_model(img_rows,
                img_cols,
                channel=1,
                num_classes=None,
                weights='imagenet'):

    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(channel, img_rows, img_cols)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(128, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(256, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Conv2D(512, (3, 3), activation='relu'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))

    # Add Fully Connected Layer
    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1000, activation='softmax'))

    # Loads ImageNet pre-trained data
    #the weights file should be downloaded from https://github.com/fchollet/deep-learning-models/releases
    model.load_weights(
        'model_weights/vgg16_weights_th_dim_ordering_th_kernels.h5')

    # Truncate and replace softmax layer for transfer learning
    model.layers.pop()
    model.outputs = [model.layers[-1].output]
    model.layers[-1].outbound_nodes = []
    model.add(Dense(num_classes, activation='softmax'))

    #metrics for top3 accuracy
    from keras.metrics import top_k_categorical_accuracy

    def top_3_categorical_accuracy(y_true, y_pred):
        return top_k_categorical_accuracy(y_true, y_pred, k=3)

    # compile the model with stochastic gradient descent
    sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy', top_3_categorical_accuracy])

    #Model overview
    model.summary()
    model.get_config()

    return model
コード例 #33
0
print('one specified layer', one.input, one.output)
print('model.summary()', model.summary())
# 编译模型
model.compile(optimizer='adadelta',
              loss='categorical_crossentropy',
              metrics=['accuracy', 'mse', 'acc', 'acc'])
print('conv1_1', conv1_1.input, conv1_1.output, conv1_1.name)
print('conv1_2', conv1_2.input, conv1_2.output, conv1_2.name)
print('maxpool1', maxpool1.input, maxpool1.output, maxpool1.name)
print('compile model.summary()', model.summary())
# 训练模型
# model.fit(x_train, y_train, batch_size=batch_size, epochs=epoches, verbose=2, validation_data=(x_test, y_test))
# model.fit_generator()
# model.save_weights('weight.h5')
# 评估模型
config = model.get_config()
w = model.get_weights()
w2 = model.weights
tojson = model.to_json()
toyaml = model.to_yaml()
print('config', pd.DataFrame(config), type(config))
for i in w:
    print('w', i.shape, type(i))
print('w2', w2, type(w2))
model.load_weights('weight.h5')
print('tojson', tojson)
print('toyaml', toyaml)
config1 = model.get_config()

w1 = model.get_weights()
w21 = model.weights
コード例 #34
0
model.add(Dropout(0.5))

model.add(Dense(num_classes))
model.add(Activation('softmax'))
'''THIS'''
sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
              optimizer=sgd,
              metrics=["accuracy"])
'''OR THIS'''
#model.compile(loss='categorical_crossentropy', optimizer='rmsprop',metrics=["accuracy"])

# Viewing model_configuration

model.summary()
model.get_config()
model.layers[0].get_config()
model.layers[0].input_shape
model.layers[0].output_shape
model.layers[0].get_weights()
np.shape(model.layers[0].get_weights()[0])
model.layers[0].trainable

#%%
'''Either This '''
# =======================================================================================
#Training
'''As we already splitted our data using train_test_split function so use it directly '''
if using_callbacks_flag == False:
    hist = model.fit(X_train,
                     y_train,
コード例 #35
0
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
Y_test = np_utils.to_categorical(y_test, nb_classes)[:max_test_samples]

print("X_train: ", X_train.shape)
print("X_test: ", X_test.shape)

##########################
# dense model test       #
##########################

print("Training classical fully connected layer for classification")
model_classical = Sequential()
model_classical.add(Dense(input_dim, 10, activation=activation))
model_classical.add(Activation('softmax'))
model_classical.get_config(verbose=1)
model_classical.compile(loss='categorical_crossentropy', optimizer='adam')
model_classical.fit(X_train,
                    Y_train,
                    batch_size=batch_size,
                    nb_epoch=nb_epoch,
                    show_accuracy=False,
                    verbose=0,
                    validation_data=(X_test, Y_test))
classical_score = model_classical.evaluate(X_test,
                                           Y_test,
                                           verbose=0,
                                           show_accuracy=True)
print('\nclassical_score:', classical_score)

##########################
コード例 #36
0
class Estimator(object):
    def __init__(self,
                 dense_layer=[512, 256],
                 lr=5e-4,
                 batch_size=128,
                 epochs=100,
                 data_gen=None,
                 summary=True):
        """
        create an estimator model
        :param dense_layer: hidden layers' dimensions, default is [512, 256]
        :param lr: learning rate of the optimizer, default is 5e-4
        :param batch_size: batch size, default is 128
        :param epochs: epochs, default is 100
        :param data_gen: conjugate Generator object, default is None
        :param summary: if shows summary of created model
        """

        now = datetime.datetime.now()
        self.date = "{}-{}-{}-{}".format(now.month, now.day, now.hour,
                                         now.minute)
        self.lr = lr
        self.batch_size = batch_size
        self.epochs = epochs
        self.dense_layer = dense_layer
        self.data_gen = data_gen
        self.word_dim = self.data_gen.model_dim
        self.loss = "categorical_crossentropy"

        self.layer = Sequential()
        self.layer.add(Flatten(input_shape=(self.word_dim, 1)))
        for i in dense_layer:
            self.layer.add(Dense(i))
            self.layer.add(BatchNormalization())
            self.layer.add(Activation('relu'))
        self.layer.add(Dense(len(data_gen.uni_cls), activation='softmax'))
        adam = Adam(lr=self.lr)
        self.layer.compile(loss=self.loss, optimizer=adam)

        if summary:
            self.layer.summary()

    def __output__(self, labels, topn=1):
        vec = np.array([
            self.data_gen.norm_model(w).reshape(self.data_gen.model_dim, 1)
            for w in labels
        ])
        preds = self.layer.predict(vec)
        for i in range(len(vec)):
            cand_ids = np.argsort(-preds[i])
            thewords = np.array(
                [self.data_gen.uni_cls[i] for i in cand_ids[0:topn]])
            yield labels[i], thewords

    def fit(self, x, y, verbose=2):
        """
        trains the created model, you can interrupt this whenever if you are bored.
        :param x: input instances' word vectors
        :param y: input classes' one hot vectors
        :param verbose: default is 2, see keras's documentation
        :return: score, keras's History object.
        """
        try:
            score = self.layer.fit(x,
                                   y,
                                   batch_size=self.batch_size,
                                   nb_epoch=self.epochs,
                                   verbose=verbose)
            return score

        except KeyboardInterrupt:
            print("keyboard interrupted")

    def print(self, labels, topn=3):
        gen = self.__output__(labels, topn)
        for labels, pred in gen:
            print("{}, :{}".format(labels, pred))

    def write(self, labels, output_path='output', filename="output"):
        gen = self.__output__(labels)
        date = "{}-{}-{}-{}".format(self.now.month, self.now.day,
                                    self.now.hour, self.now.minute)
        path = output_path + "/{}-{}.csv".format(filename, date)

        if not os.path.exists(output_path):
            os.mkdir(output_path)
        with open(path, 'a') as f:
            f.write("\n Word2Vec model dimension: {}".format(self.word_dim))
            f.write("\n Fully Connected Layers: {}".format(self.dense_layer))
            f.write("\n Fully Connected Layers: {}".format(
                self.layer.get_config()))
            f.write("\n epochs: {}".format(self.batch_size))
            f.write("\n loss function: {}".format(self.loss))
            f.write("\n{}".format("-" * 10))
            for labels, pred in gen:
                f.write("\n{},{}".format(labels, pred))
        print("{} written".format(path))

    def save(self, directory='save_model'):
        """
        save the model structure and weights
        :param directory: path to the saving directory
        """
        self.layer.save(
            os.path.join(directory, 'weights{}.hdf5'.format(self.date)))

    def predict(self, labels, answer=None, topn=1, verbose=1):
        """
        predicts classes for input instances
        :param labels: labels of instances
        :param answer: answers for type estimation, default is None
        :param topn: number of type candidates, default is 1
        :param verbose: if 2, prints estimated types and its probabilities,
                        if 1, prints estimated types,
                        if 0, prints nothing
        :return: if answer is not None,
        """
        vec = np.array([
            self.data_gen.norm_model(v).reshape(self.data_gen.model_dim, 1)
            for v in labels
        ])
        if verbose not in [0, 1, 2]:
            raise Exception("verbose should be in [0,1,2]")

        if answer is None:
            answer = np.zeros_like(labels)
        pred_vecs = self.layer.predict(vec)

        count, score, score_1 = 0, 0, 0
        for i in range(len(vec)):
            count += 1
            cand_ids = np.argsort(-pred_vecs[i])
            pred_words = np.array(
                [self.data_gen.uni_cls[i] for i in cand_ids[0:topn]])
            if pred_words[0] == answer[i]:
                score_1 += 1
            if answer[i] in pred_words:
                score += 1
            elif verbose == 1:
                print("{} prediction{}, answer:{}".format(
                    labels[i], pred_words, answer[i]))
            elif verbose == 2:
                probs = np.array([pred_vecs[i][j] for j in cand_ids])
                w_p = [i for i in zip(pred_words, probs)]
                print("{} prediction{}, answer:{}".format(
                    labels[i], w_p, answer[i]))
        if verbose > 0:
            print("-" * 10)
            print("Categorical")
            print("using top {} candidates".format(topn))
            print("evaluation: {:.2}".format(score / count))
            print("top 1 evaluation: {}".format(score_1 / count))

        if answer is not None:
            return count, score, score_1
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))

#sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
#model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=["accuracy"])
model.compile(loss='categorical_crossentropy', optimizer='rmsprop',metrics=["accuracy"])

# Viewing model_configuration

model.summary()
model.get_config()
model.layers[0].get_config()
model.layers[0].input_shape			
model.layers[0].output_shape			
model.layers[0].get_weights()
np.shape(model.layers[0].get_weights()[0])
model.layers[0].trainable

#%%
# Training
hist = model.fit(X_train, y_train, batch_size=16, nb_epoch=num_epoch, verbose=1, validation_data=(X_test, y_test))

#hist = model.fit(X_train, y_train, batch_size=32, nb_epoch=20,verbose=1, validation_split=0.2)

# Training with callbacks
from keras import callbacks
コード例 #38
0
def test_merge_overlap():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim, )))
    left.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, left], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=True,
              verbose=1,
              validation_data=(X_test, y_test))
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=False,
              verbose=2,
              validation_data=(X_test, y_test))
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=True,
              verbose=2,
              validation_split=0.1)
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=False,
              verbose=1,
              validation_split=0.1)
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              verbose=0)
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              verbose=1,
              shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test, verbose=0)
    assert (loss < 0.9)
    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)
    model.get_config(verbose=0)

    fname = 'test_merge_overlap_temp.h5'
    model.save_weights(fname, overwrite=True)
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert (loss == nloss)
コード例 #39
0
def test_lambda():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    def func(X):
        s = X[0]
        for i in range(1, len(X)):
            s += X[i]
        return s

    def activation(X):
        return K.softmax(X)

    def output_shape(input_shapes):
        return input_shapes[0]

    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim, )))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim, )))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(
        LambdaMerge([left, right], function=func, output_shape=output_shape))
    model.add(Dense(nb_class))
    model.add(Lambda(activation))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train],
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=True,
              verbose=0,
              validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train],
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=False,
              verbose=0,
              validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train],
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=True,
              verbose=0,
              validation_split=0.1)
    model.fit([X_train, X_train],
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=False,
              verbose=0,
              validation_split=0.1)
    model.fit([X_train, X_train],
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              verbose=0)
    model.fit([X_train, X_train],
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              verbose=0,
              shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert (loss < 0.8)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)
    model.get_config(verbose=0)

    # test weight saving
    fname = 'test_lambda_temp.h5'
    model.save_weights(fname, overwrite=True)
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim, )))
    left.add(Activation('relu'))
    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim, )))
    right.add(Activation('relu'))
    model = Sequential()
    model.add(
        LambdaMerge([left, right], function=func, output_shape=output_shape))
    model.add(Dense(nb_class))
    model.add(Lambda(activation))
    model.load_weights(fname)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    os.remove(fname)

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert (loss == nloss)

    # test "join" mode in Lambda
    def difference(input_dict):
        assert (len(input_dict) == 2)
        keys = list(input_dict.keys())
        return input_dict[keys[0]] - input_dict[keys[1]]

    g = Graph()
    g.add_input(name='input_a', input_shape=(2, ))
    g.add_input(name='input_b', input_shape=(2, ))
    g.add_node(Lambda(difference),
               inputs=['input_a', 'input_b'],
               merge_mode='join',
               name='d')
    g.add_output(name='output', input='d')
    g.compile(loss={'output': 'categorical_crossentropy'}, optimizer='rmsprop')
コード例 #40
0
def test_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    # TODO: factor out
    def data_generator(x, y, batch_size=50):
        index_array = np.arange(len(x))
        while 1:
            batches = make_batches(len(x_test), batch_size)
            for batch_index, (batch_start, batch_end) in enumerate(batches):
                batch_ids = index_array[batch_start:batch_end]
                x_batch = x[batch_ids]
                y_batch = y[batch_ids]
                yield (x_batch, y_batch)

    model = Sequential()
    model.add(Dense(num_hidden, input_shape=(input_dim, )))
    model.add(Activation('relu'))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              validation_data=(x_test, y_test))
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=2,
              validation_split=0.1)
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=0)
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test)

    prediction = model.predict_generator(data_generator(x_test, y_test),
                                         1,
                                         max_queue_size=2,
                                         verbose=1)
    gen_loss = model.evaluate_generator(data_generator(x_test, y_test, 50),
                                        1,
                                        max_queue_size=2)
    pred_loss = K.eval(
        K.mean(
            losses.get(model.loss)(K.variable(y_test),
                                   K.variable(prediction))))

    assert (np.isclose(pred_loss, loss))
    assert (np.isclose(gen_loss, loss))

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)
    model = Sequential()
    model.add(Dense(num_hidden, input_shape=(input_dim, )))
    model.add(Activation('relu'))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert (loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
コード例 #41
0
def test_TimeDistributed():
    # first, test with Dense layer
    model = Sequential()
    model.add(wrappers.TimeDistributed(layers.Dense(2), input_shape=(3, 4)))
    model.add(layers.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.fit(np.random.random((10, 3, 4)),
              np.random.random((10, 3, 2)),
              epochs=1,
              batch_size=10)

    # test config
    model.get_config()

    # test when specifying a batch_input_shape
    test_input = np.random.random((1, 3, 4))
    test_output = model.predict(test_input)
    weights = model.layers[0].get_weights()

    reference = Sequential()
    reference.add(
        wrappers.TimeDistributed(layers.Dense(2), batch_input_shape=(1, 3, 4)))
    reference.add(layers.Activation('relu'))
    reference.compile(optimizer='rmsprop', loss='mse')
    reference.layers[0].set_weights(weights)

    reference_output = reference.predict(test_input)
    assert_allclose(test_output, reference_output, atol=1e-05)

    # test with Embedding
    model = Sequential()
    model.add(
        wrappers.TimeDistributed(layers.Embedding(5, 6),
                                 batch_input_shape=(10, 3, 4),
                                 dtype='int32'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.fit(np.random.randint(5, size=(10, 3, 4), dtype='int32'),
              np.random.random((10, 3, 4, 6)),
              epochs=1,
              batch_size=10)

    # compare to not using batch_input_shape
    test_input = np.random.randint(5, size=(10, 3, 4), dtype='int32')
    test_output = model.predict(test_input)
    weights = model.layers[0].get_weights()

    reference = Sequential()
    reference.add(
        wrappers.TimeDistributed(layers.Embedding(5, 6),
                                 input_shape=(3, 4),
                                 dtype='int32'))
    reference.compile(optimizer='rmsprop', loss='mse')
    reference.layers[0].set_weights(weights)

    reference_output = reference.predict(test_input)
    assert_allclose(test_output, reference_output, atol=1e-05)

    # test with Conv2D
    model = Sequential()
    model.add(
        wrappers.TimeDistributed(layers.Conv2D(5, (2, 2), padding='same'),
                                 input_shape=(2, 4, 4, 3)))
    model.add(layers.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')
    model.train_on_batch(np.random.random((1, 2, 4, 4, 3)),
                         np.random.random((1, 2, 4, 4, 5)))

    model = model_from_json(model.to_json())
    model.summary()

    # test stacked layers
    model = Sequential()
    model.add(wrappers.TimeDistributed(layers.Dense(2), input_shape=(3, 4)))
    model.add(wrappers.TimeDistributed(layers.Dense(3)))
    model.add(layers.Activation('relu'))
    model.compile(optimizer='rmsprop', loss='mse')

    model.fit(np.random.random((10, 3, 4)),
              np.random.random((10, 3, 3)),
              epochs=1,
              batch_size=10)

    # test wrapping Sequential model
    model = Sequential()
    model.add(layers.Dense(3, input_dim=2))
    outer_model = Sequential()
    outer_model.add(wrappers.TimeDistributed(model, input_shape=(3, 2)))
    outer_model.compile(optimizer='rmsprop', loss='mse')
    outer_model.fit(np.random.random((10, 3, 2)),
                    np.random.random((10, 3, 3)),
                    epochs=1,
                    batch_size=10)

    # test with functional API
    x = Input(shape=(3, 2))
    y = wrappers.TimeDistributed(model)(x)
    outer_model = Model(x, y)
    outer_model.compile(optimizer='rmsprop', loss='mse')
    outer_model.fit(np.random.random((10, 3, 2)),
                    np.random.random((10, 3, 3)),
                    epochs=1,
                    batch_size=10)

    # test with BatchNormalization
    model = Sequential()
    model.add(
        wrappers.TimeDistributed(layers.BatchNormalization(center=True,
                                                           scale=True),
                                 name='bn',
                                 input_shape=(10, 2)))
    model.compile(optimizer='rmsprop', loss='mse')
    # Assert that mean and variance are 0 and 1.
    td = model.layers[0]
    assert np.array_equal(td.get_weights()[2], np.array([0, 0]))
    assert np.array_equal(td.get_weights()[3], np.array([1, 1]))
    # Train
    model.train_on_batch(np.random.normal(loc=2, scale=2, size=(1, 10, 2)),
                         np.broadcast_to(np.array([0, 1]), (1, 10, 2)))
    # Assert that mean and variance changed.
    assert not np.array_equal(td.get_weights()[2], np.array([0, 0]))
    assert not np.array_equal(td.get_weights()[3], np.array([1, 1]))
    # Verify input_map has one mapping from inputs to reshaped inputs.
    uid = object_list_uid(model.inputs)
    assert len(td._input_map.keys()) == 1
    assert uid in td._input_map
    assert K.int_shape(td._input_map[uid]) == (None, 2)
コード例 #42
0
def train_model(train_file='data_classes_4_squats_adjusted.csv',
                job_dir='leeeeeroooooyyyyyjeeeeeenkins',
                **args):
    parameter_string = 'final_25_classes_4_squats_adjusted' + '_dropout_' + str(
        dropout) + '_timesteps_' + str(
            timesteps) + '_timesteps_in_future_' + str(
                timesteps_in_future) + '_nodes_per_layer_' + str(
                    nodes_per_layer) + '_filter_length_' + str(filter_length)
    if 'gs://' in job_dir:
        logs_path = 'gs://exermotemachinelearningengine' + '/logs/' + parameter_string
    else:
        logs_path = '.' + '/logs/' + parameter_string
    print('-----------------------')
    print('Using train_file located at {}'.format(train_file))
    print('Using logs_path located at {}'.format(logs_path))
    print('-----------------------')

    # load data
    file_stream = file_io.FileIO(train_file, mode='r')
    dataframe = read_csv(file_stream, header=0)
    dataframe.fillna(0, inplace=True)
    dataset = dataframe.values

    X = dataset[:, [
        2,
        3,
        4,
        5,
        6,
        7,
        8,
        9,
        10,
        11,
        12,
        13,  # Device: xGravity, yGravity, zGravity, xAcceleration, yAcceleration, zAcceleration, pitch, roll, yaw, xRotationRate, yRotationRate, zRotationRate
        # 14,15,16,17,                          # Right Hand: rssi, xAcceleration, yAcceleration, zAcceleration
        # 18,19,20,21,                          # Left Hand: rssi, xAcceleration, yAcceleration, zAcceleration
        # 22,23,24,25,                          # Right Foot: rssi, xAcceleration, yAcceleration, zAcceleration
        # 26,27,28,29,                          # Left Foot: rssi, xAcceleration, yAcceleration, zAcceleration
        # 30,31,32,33,                          # Chest: rssi, xAcceleration, yAcceleration, zAcceleration
        # 34,35,36,37                           # Belly: rssi, xAcceleration, yAcceleration, zAcceleration
    ]].astype(float)
    y = dataset[:, 0]  # ExerciseType (Index 1 is ExerciseSubType)

    # data parameters
    data_dim = X.shape[1]
    num_classes = len(set(y))

    # scale X
    scaler = MinMaxScaler(feature_range=(0, 1))
    X = scaler.fit_transform(X)  # X*scaler.scale_+scaler.min_ (columnwise)
    print('Multiplying each row in X elementwise: {}'.format(scaler.scale_))
    print('Increasing each row in X elemtwise: {}'.format(scaler.min_))

    # encode Y
    encoder = LabelEncoder()
    encoder.fit(y)
    encoded_y = encoder.transform(y)  # encoder.classes_
    print('Hotencoding Y: {}'.format(encoder.classes_))
    hot_encoded_y = np_utils.to_categorical(encoded_y)

    # prepare data for LSTM
    def create_LSTM_dataset(x, y, timesteps):
        dataX, dataY = [], []
        for i in range(len(x) - timesteps + 1):
            dataX.append(x[i:i + timesteps, :])
            dataY.append(y[i + timesteps - timesteps_in_future - 1, :])
        return array(dataX), array(dataY)

    X, hot_encoded_y = create_LSTM_dataset(X, hot_encoded_y, timesteps)

    # define model
    model = Sequential([
        Conv1D(nodes_per_layer,
               filter_length,
               strides=2,
               activation='relu',
               input_shape=(timesteps, data_dim),
               name='accelerations'),
        Conv1D(nodes_per_layer, filter_length, strides=1, activation='relu'),
        LSTM(nodes_per_layer, return_sequences=True),
        LSTM(nodes_per_layer, return_sequences=False),
        Dropout(dropout),
        Dense(num_classes),
        Activation('softmax', name='scores'),
    ])

    model.summary()

    # compile model
    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # define callbacks
    callbacks = []

    tensor_board = TensorBoard(log_dir=logs_path,
                               histogram_freq=1,
                               write_graph=False,
                               write_images=False)
    callbacks.append(tensor_board)

    checkpoint_path = 'best_weights.h5'
    checkpoint = ModelCheckpoint(checkpoint_path,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max')
    callbacks.append(checkpoint)

    # train model
    model.fit(X,
              hot_encoded_y,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              validation_split=validation_split,
              callbacks=callbacks)

    # load best checkpoint
    model.load_weights('best_weights.h5')

    # evaluate best model
    def non_shuffling_train_test_split(X, y, test_size=validation_split):
        i = int((1 - test_size) * X.shape[0]) + 1
        X_train, X_test = split(X, [i])
        y_train, y_test = split(y, [i])
        return X_train, X_test, y_train, y_test

    _, X_test, _, y_test = non_shuffling_train_test_split(
        X, hot_encoded_y, test_size=validation_split)

    scores = model.evaluate(X_test, y_test, verbose=0)
    acc = scores[1]

    # save model
    model_h5_name = 'model_acc_' + str(acc) + '.h5'
    model.save(model_h5_name)

    # save model.h5 on to google storage
    with file_io.FileIO(model_h5_name, mode='r') as input_f:
        with file_io.FileIO(logs_path + '/' + model_h5_name,
                            mode='w+') as output_f:
            output_f.write(input_f.read())

            # reset session
            # Note: If this piece of code did help you to achieve your goal, please upvote my solution under:
            # https://stackoverflow.com/questions/41959318/deploying-keras-models-via-google-cloud-ml/44232441#44232441
            # Thank you so much :)
    k.clear_session()
    sess = tf.Session()
    k.set_session(sess)

    # disable loading of learning nodes
    k.set_learning_phase(0)

    # load model
    model = load_model(model_h5_name)
    config = model.get_config()
    weights = model.get_weights()
    new_Model = Sequential.from_config(config)
    new_Model.set_weights(weights)

    # export coreml model

    coreml_model = coremltools.converters.keras.convert(
        new_Model, input_names=['accelerations'], output_names=['scores'])
    model_mlmodel_name = 'model_acc_' + str(acc) + '.mlmodel'
    coreml_model.save(model_mlmodel_name)

    # save model.mlmodel on to google storage
    with file_io.FileIO(model_mlmodel_name, mode='r') as input_f:
        with file_io.FileIO(logs_path + '/' + model_mlmodel_name,
                            mode='w+') as output_f:
            output_f.write(input_f.read())

            # export saved model
            # Note: If this piece of code did help you to achieve your goal, please upvote my solution under:
            # https://stackoverflow.com/questions/41959318/deploying-keras-models-via-google-cloud-ml/44232441#44232441
            # Thank you so much :)
    export_path = logs_path + "/export"
    builder = saved_model_builder.SavedModelBuilder(export_path)

    signature = predict_signature_def(
        inputs={'accelerations': new_Model.input},
        outputs={'scores': new_Model.output})

    with k.get_session() as sess:
        builder.add_meta_graph_and_variables(
            sess=sess,
            tags=[tag_constants.SERVING],
            signature_def_map={
                signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
                signature
            })
        builder.save()
コード例 #43
0
class RecurrentContainer(Layer):
    def __init__(self,
                 weights=None,
                 return_sequences=False,
                 go_backwards=False,
                 stateful=False,
                 input_length=None,
                 unroll=False):
        self.return_sequences = return_sequences
        self.initial_weights = weights
        self.go_backwards = go_backwards
        self.stateful = stateful
        self.input_length = input_length
        self.unroll = unroll
        self.supports_masking = True
        self.model = Sequential()
        super(RecurrentContainer, self).__init__()

    def add(self, layer):
        '''Add a layer
		# Arguments:
		layer: Layer instance. RNNCell or a normal layer such as Dense.
		'''
        self.model.add(layer)
        if len(self.model.layers) == 1:
            shape = layer.input_spec[0].shape
            shape = (shape[0], self.input_length) + shape[1:]
            self.batch_input_shape = shape
            self.input_spec = [InputSpec(shape=shape)]
        if self.stateful:
            self.reset_states()

    def pop(self):
        '''Remove the last layer
		'''
        self.model.pop()
        if self.stateful:
            self.reset_states()

    @property
    def input_shape(self):
        return self.input_spec[0].shape

    @property
    def output_shape(self):
        input_length = self.input_spec[0].shape[1]
        shape = self.model.output_shape
        if self.return_sequences:
            return (shape[0], input_length) + shape[1:]
        else:
            return shape

    def get_output_shape_for(self, input_shape):
        return self.output_shape

    def step(self, x, states):
        states = list(states)
        state_index = 0
        nb_states = []
        for layer in self.model.layers:
            if _isRNN(layer):
                x, new_states = layer._step(
                    x, states[state_index:state_index + len(layer.states)])
                states[state_index:state_index +
                       len(layer.states)] = new_states
                state_index += len(layer.states)
            else:
                x = layer.call(x)
        return x, states

    def call(self, x, mask=None):
        input_shape = self.input_spec[0].shape
        if self.stateful:
            initial_states = self.states
        else:
            initial_states = self.get_initial_states(x)
        last_output, outputs, states = K.rnn(self.step,
                                             x,
                                             initial_states,
                                             go_backwards=self.go_backwards,
                                             mask=mask,
                                             unroll=self.unroll,
                                             input_length=input_shape[1])
        if self.stateful:
            self.updates = []
            for i in range(len(states)):
                self.updates.append((self.states[i], states[i]))
        if self.return_sequences:
            return outputs
        else:
            return last_output

    def get_initial_states(self, x):
        initial_states = []
        batch_size = self.input_spec[0].shape[0]
        input_length = self.input_spec[0].shape[1]
        if input_length is None:
            input_length = K.shape(x)[1]
        if batch_size is None:
            batch_size = K.shape(x)[0]
        input = self._get_first_timestep(x)
        for layer in self.model.layers:
            if _isRNN(layer):
                layer_initial_states = []
                for state in layer.states:
                    state = self._get_state_from_info(state, input, batch_size,
                                                      input_length)
                    if type(state) != list:
                        state = [state]
                    layer_initial_states += state
                initial_states += layer_initial_states
                input = layer._step(input, layer_initial_states)[0]
            else:
                input = layer.call(input)
        return initial_states

    def reset_states(self):
        batch_size = self.input_spec[0].shape[0]
        input_length = self.input_spec[0].shape[1]
        states = []
        for layer in self.model.layers:
            if _isRNN(layer):
                for state in layer.states:
                    assert type(state) in [tuple, list] or 'numpy' in str(
                        type(state)
                    ), 'Stateful RNNs require states with static shapes'
                    if 'numpy' in str(type(state)):
                        states += [K.variable(state)]
                    else:
                        state = list(state)
                        for i in range(len(state)):
                            if state[i] in [-1, 'batch_size']:
                                assert type(
                                    batch_size
                                ) == int, 'Stateful RNNs require states with static shapes'
                                state[i] = batch_size
                            elif state[i] == 'input_length':
                                assert type(
                                    input_length
                                ) == int, 'Stateful RNNs require states with static shapes'
                                state[i] = input_length
                        states += [K.variable(np.zeros(state))]
        self.states = states

    def _get_state_from_info(self, info, input, batch_size, input_length):
        if hasattr(info, '__call__'):
            return info(input)
        elif type(info) is tuple:
            info = list(info)
            for i in range(len(info)):
                if info[i] in [-1, 'batch_size']:
                    info[i] = batch_size
                elif info[i] == 'input_length':
                    info[i] = input_length
            if K._BACKEND == 'theano':
                from theano import tensor as k
            else:
                import tensorflow as k
            return k.zeros(info)
        elif 'numpy' in str(type(info)):
            return K.variable(info)
        else:
            return info

    def _get_first_timestep(self, x):
        slices = [slice(None)] * K.ndim(x)
        slices[1] = 0
        return x[slices]

    @property
    def trainable_weights(self):
        return self.model.trainable_weights

    @trainable_weights.setter
    def trainable_weights(self, value):
        pass

    @property
    def non_trainable_weights(self):
        return self.model.non_trainable_weights

    @non_trainable_weights.setter
    def non_trainable_weights(self, value):
        pass

    @property
    def weights(self):
        return self.model.weights

    @property
    def regularizers(self):
        return self.model.regularizers

    @regularizers.setter
    def regularizers(self, value):
        pass

    def get_config(self):

        attribs = [
            'return_sequences', 'go_backwards', 'stateful', 'input_length',
            'unroll'
        ]
        config = {x: getattr(self, x) for x in attribs}
        config['model'] = self.model.get_config()
        base_config = super(RecurrentContainer, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))

    @classmethod
    def from_config(cls, config):
        model_config = config['model']
        del config['model']
        rc = cls(**config)
        rc.model = Sequential.from_config(model_config)
        return rc
コード例 #44
0
def test_nested_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim, )))
    inner.add(Activation('relu'))
    inner.add(Dense(num_classes))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              validation_data=(x_test, y_test))
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=2,
              validation_split=0.1)
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=0)
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test, verbose=0)

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_nested_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim, )))
    inner.add(Activation('relu'))
    inner.add(Dense(num_classes))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert (loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
コード例 #45
0
ファイル: werw.py プロジェクト: yzjbryant/YZJ_MIX_Code
model2.add(Dense(512))
model2.add(Activation('relu'))
model2.add(Dropout(0.5))
model2.add(Dense(num_classes))
model.add(Activation('softmax'))

##递归神经网络RNN
from keras.layers import Embedding,LSTM
model3.add(Embedding(20000,128))
model3.add(LSTM(128,dropout=0.2,recurrent_dropout=0.2))
model3.add(Dense(1,activation='sigmoid'))

##审视模型
model.output_shape #模型输出形状
model.summary()    #模型摘要展示
model.get_config() #模型配置
model.get_weights()#列出模型的所有权重张量


##编译模型
#多层感知器:二进制分类:
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])

#多层感知器:多级分类
model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['accuraccy'])

#多层感知器:回归
model.compile(optimizer='rmsprop',loss='mse',metrics=['mae'])

#递归神经网络:
model3.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
コード例 #46
0
ファイル: NNFactory.py プロジェクト: nchos88/kaggle-mercedes
def get_deep_bidirectional(num_of_features,
                           reg1=0.01,
                           reg2=0.01,
                           neurons_conv=80,
                           neurons=19,
                           neurons2=20,
                           noise=0.3,
                           dropout=0.15,
                           lr=1.05,
                           rho=0.96):
    model = Sequential()
    model.add(InputLayer(input_shape=(num_of_features, )))

    model.add(Reshape(
        (1,
         num_of_features)))  # reshape into 4D tensor (samples, 1, maxlen, 256)

    model.add(
        Conv1D(neurons_conv,
               1,
               activation="relu",
               input_shape=(1, num_of_features),
               padding="same",
               strides=1))

    #
    model.add(GaussianNoise(noise))
    # keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0) 58.77% (+/- 3.81%)
    # keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) 57.32% (+/- 3.70%)
    # keras.optimizers.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) 57.91% (+/- 3.34%)
    # keras.optimizers.Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004) 57.83% (+/- 3.99%)

    optimizer = Adadelta(lr=lr, rho=rho, epsilon=1e-08, decay=0.0)
    model.add(
        Bidirectional(LSTM(neurons,
                           stateful=False,
                           activation="tanh",
                           consume_less='gpu',
                           unroll=True,
                           recurrent_regularizer=regularizers.l1_l2(
                               reg1, reg2),
                           return_sequences=True,
                           go_backwards=True),
                      batch_input_shape=(None, 1, num_of_features)))

    model.add(Dropout(dropout))
    model.add(
        Bidirectional(
            LSTM(neurons2,
                 stateful=False,
                 activation='tanh',
                 consume_less='gpu',
                 unroll=True,
                 batch_input_shape=(None, 1, neurons),
                 recurrent_regularizer=regularizers.l1_l2(reg1, reg2),
                 return_sequences=True,
                 go_backwards=True)))
    model.add(Dropout(dropout))
    model.add(
        Bidirectional(
            LSTM(neurons2,
                 stateful=False,
                 activation='tanh',
                 consume_less='gpu',
                 unroll=True,
                 batch_input_shape=(None, 1, neurons),
                 recurrent_regularizer=regularizers.l1_l2(reg1, reg2),
                 return_sequences=True,
                 go_backwards=True)))
    model.add(Dropout(dropout))
    model.add(
        Bidirectional(
            LSTM(neurons2,
                 stateful=False,
                 activation='linear',
                 consume_less='gpu',
                 unroll=True,
                 batch_input_shape=(None, 1, neurons),
                 recurrent_regularizer=regularizers.l1_l2(reg1, reg2),
                 return_sequences=True,
                 go_backwards=True)))
    model.add(Dropout(dropout))
    model.add(
        Bidirectional(
            LSTM(neurons2,
                 stateful=False,
                 activation='relu',
                 consume_less='gpu',
                 unroll=True,
                 batch_input_shape=(None, 1, neurons),
                 recurrent_regularizer=regularizers.l1_l2(reg1, reg2),
                 return_sequences=True,
                 go_backwards=True)))

    model.add(
        LSTM(1,
             stateful=False,
             activation='linear',
             consume_less='gpu',
             recurrent_regularizer=regularizers.l1_l2(reg1, reg2),
             unroll=True,
             batch_input_shape=(None, 1, 18),
             go_backwards=True))
    model.compile(loss='mean_squared_error',
                  optimizer=optimizer,
                  metrics=[r2_keras])
    print(model.get_config())
    print("Trained model: bidirectional")
    return model
コード例 #47
0
def loadCNN(wf_index):
    global get_output
    model = Sequential()

    model.add(Conv2D(nb_filters, (nb_conv, nb_conv),
                     padding='valid',
                     input_shape=(img_channels, img_rows, img_cols)))
    convout1 = Activation('relu')
    model.add(convout1)
    model.add(Conv2D(nb_filters, (nb_conv, nb_conv)))
    convout2 = Activation('relu')
    model.add(convout2)
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    '''

    model.add(ZeroPadding2D((1,1),input_shape=(img_channels, img_rows, img_cols)))
    model.add(Conv2D(nb_filters , (nb_conv, nb_conv), activation='relu'))
    #model.add(ZeroPadding2D((1,1)))
    #model.add(Conv2D(nb_filters , (nb_conv, nb_conv), activation='relu'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    model.add(Dropout(0.2))

    #model.add(ZeroPadding2D((1,1)))
    model.add(Conv2D(nb_filters , (nb_conv, nb_conv), activation='relu'))
    #model.add(ZeroPadding2D((1,1)))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    ##
    #model.add(Conv2D(nb_filters , (nb_conv, nb_conv), activation='relu'))
    #model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool), strides=(2,2)))

    model.add(Dropout(0.3))
    model.add(Flatten())
    ###
    #model.add(Dense(128))
    #model.add(Activation('relu'))
    #model.add(Dropout(0.5))

    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
    '''

    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])

    # Model summary
    model.summary()
    # Model conig details
    model.get_config()

    # from keras.utils import plot_model
    # plot_model(model, to_file='new_model.png', show_shapes = True)

    if wf_index >= 0:
        # Load pretrained weights
        fname = WeightFileName[int(wf_index)]
        print("loading ", fname)
        model.load_weights(fname)

    layer = model.layers[11]
    get_output = K.function([model.layers[0].input, K.learning_phase()], [layer.output, ])

    return model
コード例 #48
0
ファイル: engine.py プロジェクト: themrmax/recurrentshop
class RecurrentContainer(Layer):
    def __init__(self,
                 weights=None,
                 return_sequences=False,
                 return_states=False,
                 go_backwards=False,
                 stateful=False,
                 readout=False,
                 state_sync=False,
                 decode=False,
                 output_length=None,
                 input_length=None,
                 unroll=False,
                 **kwargs):
        self.return_sequences = return_sequences or decode
        self.return_states = return_states
        self.initial_weights = weights
        self.go_backwards = go_backwards
        self.stateful = stateful
        self.readout = readout
        self.state_sync = state_sync
        self.decode = decode
        if decode:
            assert output_length, 'Missing argument: output_length should be specified for decoders.'
        self.output_length = output_length
        self.input_length = input_length
        self.unroll = unroll
        if unroll and not decode:
            assert input_length, 'Missing argument: input_length should be specified for unrolling.'
        self.supports_masking = True
        self.model = Sequential()
        self.supports_masking = True
        self._truth_tensor = None
        self.initial_readout = None
        super(RecurrentContainer, self).__init__(**kwargs)

    def add(self, layer):
        '''Add a layer
		# Arguments:
		layer: Layer instance. RNNCell or a normal layer such as Dense.
		'''
        self.model.add(layer)
        self.uses_learning_phase = self._truth_tensor or any(
            [l.uses_learning_phase for l in self.model.layers])
        if len(self.model.layers) == 1:
            if layer.input_spec is not None:
                shape = layer.input_spec[0].shape
            else:
                shape = layer.input_shape
            if not self.decode:
                shape = (shape[0], self.input_length) + shape[1:]
            self.batch_input_shape = shape
            self.input_spec = [InputSpec(shape=shape)]
        if _isRNN(layer) and self.state_sync:
            if not hasattr(self, 'nb_states'):
                self.nb_states = len(layer.states)
            else:
                assert len(
                    layer.states
                ) == self.nb_states, 'Incompatible layer. In a state synchronized recurrent container, all the cells should have the same number of states.'
        if self.stateful:
            self.reset_states()

    def pop(self):
        '''Remove the last layer
		'''
        self.model.pop()
        if self.stateful:
            self.reset_states()

    @property
    def input_shape(self):
        return self.input_spec[0].shape

    @property
    def output_shape(self):
        shape = self.model.output_shape
        if self.decode:
            shape = (shape[0], self.output_length) + shape[1:]
        elif self.return_sequences:
            input_length = self.input_spec[0].shape[1]
            shape = (shape[0], input_length) + shape[1:]
        if self.return_states:
            shape = [shape] + [None] * self.nb_states
        return shape

    def get_output_shape_for(self, input_shape):
        if self.return_states:
            output_shape = self.output_shape
            state_shapes = output_shape[1:]
            output_shape = output_shape[0]
            output_shape = (input_shape[0], ) + output_shape[1:]
            return [output_shape] + state_shapes
        else:
            return (input_shape[0], ) + self.output_shape[1:]

    def step(self, x, states):
        states = list(states)
        state_index = 0
        if self.decode:
            x = states[0]
            _x = x
            states = states[1:]
        for i in range(len(self.model.layers)):
            layer = self.model.layers[i]
            if self.readout and (
                (i == 0 and self.readout != 'call') or
                (self.readout == 'call' and hasattr(layer, 'receive_readout')
                 and layer.receive_readout)):
                readout = states[-1]
                if self._truth_tensor is not None:
                    slices = [
                        slice(None),
                        states[-2][0] - K.switch(states[-2][0], 1, 0)
                    ] + [slice(None)] * (K.ndim(self._truth_tensor) - 2)
                    readout = K.in_train_phase(
                        K.switch(states[-2][0], self._truth_tensor[slices],
                                 readout), readout)
                if self.readout in ['add', True]:
                    x += readout
                elif self.readout == 'mul':
                    x *= readout
                elif self.readout == 'pack':
                    x = K.pack([x, readout])
                elif self.readout == 'readout_only':
                    x = readout
                elif self.readout == 'call':
                    x = [x, readout]
            if _isRNN(layer):
                if self.state_sync:
                    x, new_states = layer._step(x, states[:len(layer.states)])
                    states[:len(layer.states)] = new_states
                else:
                    x, new_states = layer._step(
                        x, states[state_index:state_index + len(layer.states)])
                    states[state_index:state_index +
                           len(layer.states)] = new_states
                    state_index += len(layer.states)
            else:
                x = layer.call(x)
        if self.decode:
            states = [_x] + states
        if self.readout:
            if self._truth_tensor is not None:
                states[-2] += 1
            states[-1] = x
        return x, states

    def call(self, x, mask=None):
        if type(x) in [list, tuple]:
            if 'ground_truth' in self.input_format:
                self.set_truth_tensor(
                    x[self.input_format.index('ground_truth')])
            if 'initial_readout' in self.input_format:
                self.initial_readout = x[self.input_format.index(
                    'initial_readout')]
            if 'states' in self.input_format:
                states = x[self.input_format.index('states'):]
                for i in range(len(states)):
                    self.set_state(self.state_indices[i], states[i])
            x = x[0]
        if self.initial_readout is not None and self.readout == 'readout_only':
            self.initial_readout = x
        unroll = self.unroll
        '''
		if K.backend() == 'tensorflow':
			cell_types = set([type(layer) for layer in self.model.layers if _isRNN(layer)])
			if len(cell_types) > 1:
				unroll = True
		'''
        input_shape = self.input_spec[0].shape
        if self.stateful:
            initial_states = self.states
        else:
            initial_states = self.get_initial_states(x)
        if self.decode:
            initial_states = [x] + initial_states
            if self.uses_learning_phase:
                with learning_phase(0):
                    last_output_0, outputs_0, states_0, updates = rnn(
                        self.step,
                        K.zeros((1, self.output_length, 1)),
                        initial_states,
                        unroll=unroll,
                        input_length=self.output_length)
                with learning_phase(1):
                    last_output_1, outputs_1, states_1, updates = rnn(
                        self.step,
                        K.zeros((1, self.output_length, 1)),
                        initial_states,
                        unroll=unroll,
                        input_length=self.output_length)
                outputs = K.in_train_phase(outputs_1, outputs_0)
                last_output = _get_last_timestep(outputs)
                states = [
                    K.in_train_phase(states_1[i], states_0[i])
                    for i in range(len(states_0))
                ]
            else:
                last_output, outputs, states, updates = rnn(
                    self.step,
                    K.zeros((1, self.output_length, 1)),
                    initial_states,
                    unroll=unroll,
                    input_length=self.output_length)
        else:
            if self.uses_learning_phase:
                with learning_phase(0):
                    last_output_0, outputs_0, states_0, updates = rnn(
                        self.step,
                        x,
                        initial_states,
                        go_backwards=self.go_backwards,
                        mask=mask,
                        unroll=unroll,
                        input_length=input_shape[1])
                with learning_phase(1):
                    last_output_1, outputs_1, states_1, updates = rnn(
                        self.step,
                        x,
                        initial_states,
                        go_backwards=self.go_backwards,
                        mask=mask,
                        unroll=unroll,
                        input_length=input_shape[1])
                outputs = K.in_train_phase(outputs_1, outputs_0)
                last_output = _get_last_timestep(outputs)
                states = [
                    K.in_train_phase(states_1[i], states_0[i])
                    for i in range(len(states_0))
                ]
            else:
                last_output, outputs, states, updates = rnn(
                    self.step,
                    x,
                    initial_states,
                    go_backwards=self.go_backwards,
                    mask=mask,
                    unroll=unroll,
                    input_length=input_shape[1])
        #self.add_update(updates, x)
        states = list(states)
        if self.stateful:
            for i in range(len(states)):
                if type(self.states[i]) == type(K.zeros((1, ))):
                    updates.append((self.states[i], states[i]))
            self.add_update(updates, x)
        if self.decode:
            states.pop(0)
        if self.readout:
            states.pop(-1)
            if self._truth_tensor is not None:
                states.pop(-1)
        if self.return_sequences:
            y = outputs
        else:
            y = last_output
        if self.return_states:
            y = [y] + states
        return y

    def get_initial_states(self, x):
        initial_states = []
        batch_size = self.input_spec[0].shape[0]
        input_length = self.input_spec[0].shape[1]
        if input_length is None:
            input_length = K.shape(x)[1]
        if batch_size is None:
            batch_size = K.shape(x)[0]
        if self.decode:
            input = x
        else:
            input = _get_first_timestep(x)
        for layer in self.model.layers:
            if _isRNN(layer):
                layer_initial_states = []
                for state in layer.states:
                    state = self._get_state_from_info(state, input, batch_size,
                                                      input_length)
                    if type(state) != list:
                        state = [state]
                    layer_initial_states += state
                if not self.state_sync or initial_states == []:
                    initial_states += layer_initial_states
                input = layer._step(input, layer_initial_states)[0]
            else:
                input = layer.call(input)
        if self.readout:
            if self._truth_tensor is not None:
                initial_states += [K.zeros((1, ), dtype='int32')]
            if self.initial_readout is not None:
                initial_readout = self._get_state_from_info(
                    self.initial_readout, input, batch_size, input_length)
                initial_states += [initial_readout]
            else:
                initial_states += [K.zeros_like(input)]
        return initial_states

    def reset_states(self):
        batch_size = self.input_spec[0].shape[0]
        input_length = self.input_spec[0].shape[1]
        states = []
        for layer in self.model.layers:
            if _isRNN(layer):
                for state in layer.states:
                    #assert type(state) in [tuple, list] or 'numpy' in str(type(state)), 'Stateful RNNs require states with static shapes'
                    if 'numpy' in str(type(state)):
                        states += [K.variable(state)]
                    elif type(state) in [list, tuple]:
                        state = list(state)
                        for i in range(len(state)):
                            if state[i] in [-1, 'batch_size']:
                                assert type(
                                    batch_size
                                ) == int, 'Stateful RNNs require states with static shapes'
                                state[i] = batch_size
                            elif state[i] == 'input_length':
                                assert type(
                                    input_length
                                ) == int, 'Stateful RNNs require states with static shapes'
                                state[i] = input_length
                        states += [K.variable(np.zeros(state))]
                    else:
                        states += [state]
                if self.state_sync:
                    break
        if self.readout:
            shape = list(self.model.output_shape)
            shape.pop(1)
            if self._truth_tensor is not None:
                states += [K.zeros((1, ), dtype='int32')]
            states += [K.zeros(shape)]
        self.states = states

    def _get_state_from_info(self, info, input, batch_size, input_length):
        if hasattr(info, '__call__'):
            return info(input)
        elif type(info) in [list, tuple]:
            info = list(info)
            for i in range(len(info)):
                if info[i] in [-1, 'batch_size']:
                    info[i] = batch_size
                elif info[i] == 'input_length':
                    info[i] = input_length
            if K._BACKEND == 'theano':
                from theano import tensor as k
            else:
                import tensorflow as k
            return k.zeros(info)
        elif 'numpy' in str(type(info)):
            return K.variable(info)
        else:
            return info

    def compute_mask(self, input, input_mask=None):
        mask = input_mask[0] if type(input_mask) is list else input_mask
        mask = mask if self.return_sequences else None
        mask = [mask] + [None] * self.nb_states if self.return_states else mask
        return mask

    @property
    def trainable_weights(self):
        if not self.model.layers:
            return []
        return self.model.trainable_weights

    @trainable_weights.setter
    def trainable_weights(self, value):
        pass

    @property
    def non_trainable_weights(self):
        if not self.model.layers:
            return []
        return self.model.non_trainable_weights

    @non_trainable_weights.setter
    def non_trainable_weights(self, value):
        pass

    @property
    def weights(self):
        return self.model.weights

    def set_truth_tensor(self, val):
        if val is not None:
            self.uses_learning_phase = True
        self._truth_tensor = val

    def get_config(self):
        attribs = [
            'return_sequences', 'return_states', 'go_backwards', 'stateful',
            'readout', 'state_sync', 'decode', 'input_length', 'unroll',
            'output_length'
        ]
        config = {x: getattr(self, x) for x in attribs}
        config['model'] = self.model.get_config()
        base_config = super(RecurrentContainer, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))

    @classmethod
    def from_config(cls, config):
        model_config = config['model']
        del config['model']
        rc = cls(**config)
        from . import cells
        rc.model = Sequential()
        for layer_config in model_config:
            if 'config' in layer_config and 'name' in layer_config['config']:
                del layer_config['config']['name']
            layer = layer_from_config(layer_config, cells.__dict__)
            rc.add(layer)
        return rc

    def __call__(self, x, mask=None):
        args = ['input', 'ground_truth', 'initial_readout', 'states']
        if type(x) is dict:
            x = list(map(x.get, args))
        elif type(x) not in [list, tuple]:
            x = [x, None, None, None]
        self.input_format = []
        input_tensors = []
        for i in range(3):
            if x[i] is not None:
                self.input_format += [args[i]]
                input_tensors += [x[i]]
        if x[3] is not None:
            self.input_format += [args[3]]
            states = []
            self.state_indices = []
            for i in range(len(x[3])):
                if x[3][i] is not None:
                    states += [x[3][i]]
                    self.state_indices += [i]
            input_tensors += states

        if not self.built:
            self.assert_input_compatibility(x)
            input_shapes = []
            for x_elem in input_tensors:
                if hasattr(x_elem, '_keras_shape'):
                    input_shapes.append(x_elem._keras_shape)
                elif hasattr(K, 'int_shape'):
                    input_shapes.append(K.int_shape(x_elem))
                elif x_elem is not None:
                    raise Exception('You tried to call layer "' + self.name +
                                    '". This layer has no information'
                                    ' about its expected input shape, '
                                    'and thus cannot be built. '
                                    'You can build it manually via: '
                                    '`layer.build(batch_input_shape)`')
            self.build(input_shapes[0])
            self.built = True
        self.assert_input_compatibility(x[0])
        input_added = False
        inbound_layers = []
        node_indices = []
        tensor_indices = []
        self.ignore_indices = []
        for i in range(len(input_tensors)):
            input_tensor = input_tensors[i]
            if hasattr(input_tensor,
                       '_keras_history') and input_tensor._keras_history:
                previous_layer, node_index, tensor_index = input_tensor._keras_history
                inbound_layers.append(previous_layer)
                node_indices.append(node_index)
                tensor_indices.append(tensor_index)
            else:
                inbound_layers = None
                break
        if inbound_layers:
            self.add_inbound_node(inbound_layers, node_indices, tensor_indices)
            input_added = True
        if input_added:
            outputs = self.inbound_nodes[-1].output_tensors
            if len(outputs) == 1:
                return outputs[0]
            else:
                return outputs
        else:
            return self.call(x, mask)

    def set_state(self, index, state):
        n = 0
        for layer in self.model.layers:
            if _isRNN(layer):
                if self.state_sync:
                    layer.states[index] = state
                    return
                n += len(layer.states)
                if index < n:
                    layer.states[index + len(layer.states) - n] = state
                    return

    @property
    def nb_states(self):
        if self.state_sync:
            for layer in self.model.layers:
                if _isRNN(layer):
                    return len(layer.states)
        return 0
コード例 #49
0
ファイル: test2.py プロジェクト: rushic24/T-Rex-game-AI
    def pre_execute():
        PATH = 'F:\\SupervisedChromeTrex\\keras practise'
        # Define data path
        data_path = PATH + '\data\input_data'
        data_dir_list = os.listdir(data_path)
        img_rows = 128
        img_cols = 128
        num_channel = 1
        num_epoch = 8

        # Define the number of classes
        num_classes = 2

        img_data_list = []

        for dataset in data_dir_list:
            img_list = os.listdir(data_path + '\\' + dataset)
            print('Loaded the images of dataset-' + '{}\n'.format(dataset))
            for img in img_list:
                input_img = cv2.imread(data_path + '\\' + dataset + '\\' + img, 0)
                input_img_resize = cv2.resize(input_img, (128, 128))
                cv2.imwrite(PATH + '\\data\\input_data_resized' + '\\' + dataset + '\\' + img, input_img_resize)
                img_data_list.append(input_img_resize)

        img_data = np.array(img_data_list)
        img_data = img_data.astype('float32')
        img_data /= 255
        print(img_data.shape)

        if num_channel == 1:
            if K.image_dim_ordering() == 'th':
                img_data = np.expand_dims(img_data, axis=1)
                print(img_data.shape)
            else:
                img_data = np.expand_dims(img_data, axis=4)
                print(img_data.shape)
        else:
            if K.image_dim_ordering() == 'th':
                img_data = np.rollaxis(img_data, 3, 1)
                print(img_data.shape)

        # %%
        USE_SKLEARN_PREPROCESSING = False

        if USE_SKLEARN_PREPROCESSING:
            # using sklearn for preprocessing
            from sklearn import preprocessing


            def image_to_feature_vector(image, size=(128, 128)):
                # resize the image to a fixed size, then flatten the image into
                # a list of raw pixel intensities
                return cv2.resize(image, size).flatten()


            img_data_list = []
            for dataset in data_dir_list:
                img_list = os.listdir(data_path + '/' + dataset)
                print('Loaded the images of dataset-' + '{}\n'.format(dataset))
                for img in img_list:
                    input_img = cv2.imread(data_path + '/' + dataset + '/' + img)
                    input_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2GRAY)
                    input_img_flatten = image_to_feature_vector(input_img, (128, 128))
                    img_data_list.append(input_img_flatten)

            img_data = np.array(img_data_list)
            img_data = img_data.astype('float32')
            print(img_data.shape)
            img_data_scaled = preprocessing.scale(img_data)
            print(img_data_scaled.shape)

            print(np.mean(img_data_scaled))
            print(np.std(img_data_scaled))

            print(img_data_scaled.mean(axis=0))
            print(img_data_scaled.std(axis=0))

            if K.image_dim_ordering() == 'th':
                img_data_scaled = img_data_scaled.reshape(img_data.shape[0], num_channel, img_rows, img_cols)
                print(img_data_scaled.shape)

            else:
                img_data_scaled = img_data_scaled.reshape(img_data.shape[0], img_rows, img_cols, num_channel)
                print(img_data_scaled.shape)

            if K.image_dim_ordering() == 'th':
                img_data_scaled = img_data_scaled.reshape(img_data.shape[0], num_channel, img_rows, img_cols)
                print(img_data_scaled.shape)

            else:
                img_data_scaled = img_data_scaled.reshape(img_data.shape[0], img_rows, img_cols, num_channel)
                print(img_data_scaled.shape)

        if USE_SKLEARN_PREPROCESSING:
            img_data = img_data_scaled
        # %%
        # Assigning Labels

        # Define the number of classes
        num_classes = 2

        num_of_samples = img_data.shape[0]
        labels = np.ones((num_of_samples,), dtype='int64')

        labels[0:117] = 0
        labels[117:] = 1

        names = ['cats', 'dogs']

        # convert class labels to on-hot encoding
        Y = np_utils.to_categorical(labels, num_classes)

        # Shuffle the dataset
        x, y = shuffle(img_data, Y, random_state=2)
        # Split the dataset
        X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=2)

        # %%
        # Defining the model
        input_shape = img_data[0].shape

        model = Sequential()

        model.add(Convolution2D(32, 3, 3, border_mode='same', input_shape=input_shape))
        model.add(Activation('relu'))
        model.add(Convolution2D(32, 3, 3))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.5))

        model.add(Convolution2D(64, 3, 3))
        model.add(Activation('relu'))
        # model.add(Convolution2D(64, 3, 3))
        # model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.5))

        model.add(Flatten())
        model.add(Dense(64))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(Dense(num_classes))
        model.add(Activation('softmax'))

        # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
        # model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=["accuracy"])
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=["accuracy"])

        # Viewing model_configuration

        model.summary()
        model.get_config()
        model.layers[0].get_config()
        model.layers[0].input_shape
        model.layers[0].output_shape
        model.layers[0].get_weights()
        np.shape(model.layers[0].get_weights()[0])
        model.layers[0].trainable

        # %%
        # Training
        hist = model.fit(X_train, y_train, batch_size=16, nb_epoch=num_epoch, verbose=1, validation_data=(X_test, y_test))

        # hist = model.fit(X_train, y_train, batch_size=32, nb_epoch=20,verbose=1, validation_split=0.2)

        # Training with callbacks
        from keras import callbacks

        filename = 'model_train_new.csv'
        csv_log = callbacks.CSVLogger(filename, separator=',', append=False)

        early_stopping = callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=0, verbose=0, mode='min')

        filepath = "Best-weights-my_model-{epoch:03d}-{loss:.4f}-{acc:.4f}.hdf5"

        checkpoint = callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')

        callbacks_list = [csv_log, early_stopping, checkpoint]

        hist = model.fit(X_train, y_train, batch_size=16, nb_epoch=num_epoch, verbose=1, validation_data=(X_test, y_test),
                         callbacks=callbacks_list)
コード例 #50
0
model = Sequential()
model.add(LSTM(4, input_dim=look_back))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(trainX, trainY, nb_epoch=100, batch_size=1, verbose=2)

trainPredict = model.predict(trainX)
testPredcit = model.predict(testX)

plt.clf()
plt.plot(trainPredict)
plt.plot(testPredcit)

model.summary()

model.get_config()

model.get_weights()

model.to_json()
model.to_yaml()

model_lstm = Sequential()
model_lstm.add(LSTM(4, input_dim=2, init='zero', inner_init='zero'))
model_lstm.summary()
model_lstm.get_config()

for w in model_lstm.get_weights():
    print(np.shape(w))
コード例 #51
0
def test_nested_sequential():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(nb_hidden, input_shape=(input_dim, )))
    inner.add(Activation('relu'))
    inner.add(Dense(nb_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.summary()

    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=True,
              verbose=1,
              validation_data=(X_test, y_test))
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=False,
              verbose=2,
              validation_data=(X_test, y_test))
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=True,
              verbose=2,
              validation_split=0.1)
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=False,
              verbose=1,
              validation_split=0.1)
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              verbose=0)
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              verbose=1,
              shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test, verbose=0)
    assert (loss < 0.8)

    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)
    model.get_config(verbose=0)

    fname = 'test_nested_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(nb_hidden, input_shape=(input_dim, )))
    inner.add(Activation('relu'))
    inner.add(Dense(nb_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert (loss == nloss)

    # test json serialization
    json_data = model.to_json()
    model = model_from_json(json_data)

    # test yaml serialization
    yaml_data = model.to_yaml()
    model = model_from_yaml(yaml_data)
コード例 #52
0
def test_sequential():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    # TODO: factor out
    def data_generator(train):
        if train:
            max_batch_index = len(X_train) // batch_size
        else:
            max_batch_index = len(X_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (X_train[i * batch_size:(i + 1) * batch_size],
                       y_train[i * batch_size:(i + 1) * batch_size])
            else:
                yield (X_test[i * batch_size:(i + 1) * batch_size],
                       y_test[i * batch_size:(i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    model = Sequential()
    model.add(Dense(nb_hidden, input_shape=(input_dim, )))
    model.add(Activation('relu'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.summary()

    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=True,
              verbose=1,
              validation_data=(X_test, y_test))
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=False,
              verbose=2,
              validation_data=(X_test, y_test))
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=True,
              verbose=2,
              validation_split=0.1)
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=False,
              verbose=1,
              validation_split=0.1)
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              verbose=0)
    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              verbose=1,
              shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    gen_loss = model.evaluate_generator(data_generator(True), 256, verbose=0)
    assert (gen_loss < 0.8)

    loss = model.evaluate(X_test, y_test, verbose=0)
    assert (loss < 0.8)

    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)
    model.get_config(verbose=0)

    fname = 'test_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)
    model = Sequential()
    model.add(Dense(nb_hidden, input_shape=(input_dim, )))
    model.add(Activation('relu'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert (loss == nloss)

    # test json serialization
    json_data = model.to_json()
    model = model_from_json(json_data)

    # test yaml serialization
    yaml_data = model.to_yaml()
    model = model_from_yaml(yaml_data)
コード例 #53
0
#Split into test_x and test_y variables
test_set = numpy.loadtxt("train_1.csv", delimiter=",")
x_test = test_set[2000:5000, 0:6]
y_test = test_set[2000:5000, 6]
print X

# create model
model = Sequential()
model.add(Dense(21, activation='relu', input_dim=6))
model.add(Dense(63, activation='relu'))
model.add(Dense(1))

#inspect model
print(model.output_shape)
print(model.summary())
print model.get_config()

# Compile model
# aplha 0.001
model.compile(optimizer='rmsprop', loss='mse', metrics=['accuracy'])

# Fit the model
model.fit(X[0:7805, :],
          Y[0:7805],
          batch_size=32,
          epochs=1000,
          verbose=1,
          validation_data=(X[7805:11150, :], Y[7805:11150]))

# calculate predictions
scores = model.evaluate(x_test, y_test, batch_size=32)
コード例 #54
0
def test_siamese_2():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim, )))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim, )))
    right.add(Activation('relu'))

    add_shared_layer(Dense(nb_hidden), [left, right])

    left.add(Dense(nb_hidden))
    right.add(Dense(nb_hidden))

    add_shared_layer(Dense(nb_hidden), [left, right])

    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train],
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=True,
              verbose=0,
              validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train],
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=False,
              verbose=0,
              validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train],
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=True,
              verbose=0,
              validation_split=0.1)
    model.fit([X_train, X_train],
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=False,
              verbose=0,
              validation_split=0.1)
    model.fit([X_train, X_train],
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              verbose=0)
    model.fit([X_train, X_train],
              y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              verbose=0,
              shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert (loss < 0.8)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)
    model.get_config(verbose=0)

    # test weight saving
    fname = 'test_siamese_2.h5'
    model.save_weights(fname, overwrite=True)
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim, )))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim, )))
    right.add(Activation('relu'))

    add_shared_layer(Dense(nb_hidden), [left, right])

    left.add(Dense(nb_hidden))
    right.add(Dense(nb_hidden))

    add_shared_layer(Dense(nb_hidden), [left, right])

    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))

    model.load_weights(fname)
    os.remove(fname)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert (loss == nloss)
コード例 #55
0
ファイル: bilstm.py プロジェクト: XiaoliLi6/Seminar2020
end = datetime.datetime.now()
time = end - start
print('运行时间:{}'.format(time))

# 模型保存

# model.save(r"C:\Users\Administrator\Desktop\研一下课程\bilstm\cnn_lstm_cnnlstm_textcnn_bilstm\lstm.h5")
#
# print("#---------------------------------------------------#")
# print("保存模型")
# print("#---------------------------------------------------#")
# print("\n")
#
# 模型总结
# 打印出模型概况,它实际调用的是keras.utils.print_summary
print("#---------------------------------------------------#")
print("输出模型总结")
print(model.summary())
print("#---------------------------------------------------#")
print("\n")

# 模型的配置文件
# 返回包含模型配置信息的Python字典
config = model.get_config()

print("#---------------------------------------------------#")
print("输出模型配置信息")
print(config)
print("#---------------------------------------------------#")
print("\n")
コード例 #56
0
    return classifier


classifier = kc(build_fn=build_classifier, batch_size=10, epochs=100)
accuracies = cvs(estimator=classifier, X=xTrain, y=yTrain, cv=10, n_jobs=-1)
mean = accuracies.mean()
variance = accuracies.std()
'''Below is an attempt to export the ANN as TF Serving'''
#Exporting model with TensorFlow Serving
from keras.models import Model
from tensorflow.python.saved_model.signature_def_utils_impl import build_signature_def, predict_signature_def
sess = tf.Session()
K.set_session(sess)
K.set_learning_phase(0)  #all new operations will be in test mode from now on
#serialize the model and get its weights, for quick re-building
config = classifier.get_config()
weights = classifier.get_weights()
#re-build a model where the learning phase is now hard-coded to 0
new_model = Model.from_config(config)  #removed Sequential.from_config(config)
new_model.set_weights(weights)
#Exporting as TFS
export_path = 'D:\Programming Tutorials\Machine Learning\Machine Learning AZ - Hadllin De Ponteves\Part 8 - Deep Learning\Section 39 - Artificial Neural Networks (ANN)'  # where to save the exported graph
export_version = 1  # version number (integer)
saver = tf.train.Saver(sharded=True)
model_exporter = exporter.Exporter(saver)
signature = predict_signature_def(input_tensor=classifier.input,
                                  scores_tensor=classifier.output)
model_exporter.init(sess.graph.as_graph_def(),
                    default_graph_signature=signature)
model_exporter.export(export_path, tf.constant(export_version), sess)
'''
コード例 #57
0
ファイル: test_autoencoder.py プロジェクト: CVML/keras
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)[:max_train_samples]
Y_test = np_utils.to_categorical(y_test, nb_classes)[:max_test_samples]

print("X_train: ", X_train.shape)
print("X_test: ", X_test.shape)

##########################
# dense model test       #
##########################

print("Training classical fully connected layer for classification")
model_classical = Sequential()
model_classical.add(Dense(input_dim, 10, activation=activation))
model_classical.add(Activation('softmax'))
model_classical.get_config(verbose=1)
model_classical.compile(loss='categorical_crossentropy', optimizer='adam')
model_classical.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=(X_test, Y_test))
classical_score = model_classical.evaluate(X_test, Y_test, verbose=0, show_accuracy=True)
print('\nclassical_score:', classical_score)

##########################
# autoencoder model test #
##########################

def build_lstm_autoencoder(autoencoder, X_train, X_test):
	X_train = X_train[:, np.newaxis, :] 
	X_test = X_test[:, np.newaxis, :]
	print("Modified X_train: ", X_train.shape)
	print("Modified X_test: ", X_test.shape)
    def buildModel(self):
        params = self.params

        if self.params['charEmbeddings'] not in [
                None, "None", "none", False, "False", "false"
        ]:
            self.padCharacters()

        embeddings = self.embeddings
        casing2Idx = self.dataset['mappings']['casing']

        caseMatrix = np.identity(len(casing2Idx), dtype='float32')

        tokens = Sequential()
        tokens.add(
            Embedding(input_dim=embeddings.shape[0],
                      output_dim=embeddings.shape[1],
                      weights=[embeddings],
                      trainable=True,
                      name='token_emd'))

        casing = Sequential()
        # casing.add(Embedding(input_dim=len(casing2Idx), output_dim=self.addFeatureDimensions, trainable=True))
        casing.add(
            Embedding(input_dim=caseMatrix.shape[0],
                      output_dim=caseMatrix.shape[1],
                      weights=[caseMatrix],
                      trainable=True,
                      name='casing_emd'))

        mergeLayers = [tokens, casing]

        if self.additionalFeatures != None:
            for addFeature in self.additionalFeatures:
                maxAddFeatureValue = max([
                    max(sentence[addFeature])
                    for sentence in self.dataset['trainMatrix'] +
                    self.dataset['devMatrix'] + self.dataset['testMatrix']
                ])
                addFeatureEmd = Sequential()
                addFeatureEmd.add(
                    Embedding(input_dim=maxAddFeatureValue + 1,
                              output_dim=self.params['addFeatureDimensions'],
                              trainable=True,
                              name=addFeature + '_emd'))
                mergeLayers.append(addFeatureEmd)

        # :: Character Embeddings ::
        if params['charEmbeddings'] not in [
                None, "None", "none", False, "False", "false"
        ]:
            charset = self.dataset['mappings']['characters']
            charEmbeddingsSize = params['charEmbeddingsSize']
            maxCharLen = self.maxCharLen
            charEmbeddings = []
            for _ in charset:
                limit = math.sqrt(3.0 / charEmbeddingsSize)
                vector = np.random.uniform(-limit, limit, charEmbeddingsSize)
                charEmbeddings.append(vector)

            charEmbeddings[0] = np.zeros(charEmbeddingsSize)  # Zero padding
            charEmbeddings = np.asarray(charEmbeddings)

            chars = Sequential()
            chars.add(
                TimeDistributed(Embedding(input_dim=charEmbeddings.shape[0],
                                          output_dim=charEmbeddings.shape[1],
                                          weights=[charEmbeddings],
                                          trainable=True,
                                          mask_zero=True),
                                input_shape=(None, maxCharLen),
                                name='char_emd'))

            if params['charEmbeddings'].lower(
            ) == 'lstm':  # Use LSTM for char embeddings from Lample et al., 2016
                charLSTMSize = params['charLSTMSize']
                chars.add(
                    TimeDistributed(Bidirectional(
                        LSTM(charLSTMSize, return_sequences=False)),
                                    name="char_lstm"))
            else:  # Use CNNs for character embeddings from Ma and Hovy, 2016
                charFilterSize = params['charFilterSize']
                charFilterLength = params['charFilterLength']
                chars.add(
                    TimeDistributed(Convolution1D(charFilterSize,
                                                  charFilterLength,
                                                  border_mode='same'),
                                    name="char_cnn"))
                chars.add(
                    TimeDistributed(GlobalMaxPooling1D(), name="char_pooling"))

            mergeLayers.append(chars)
            if self.additionalFeatures == None:
                self.additionalFeatures = []

            self.additionalFeatures.append('characters')

        model = Sequential()
        model.add(Merge(mergeLayers, mode='concat'))
        for i in range(params['layers']):
            act = params["activation"]
            #act = LeakyReLU()
            #act = PReLU(init='zero',weights=None)
            if act not in ["leakyrelu..."]:  #act = LeakyReLU()
                # model.add(Bidirectional(
                #         #     LSTM(size, activation=params['activation'], return_sequences=True, dropout_W=params['dropout'][0],
                #         #          dropout_U=params['dropout'][1]), name="varLSTM_" + str(cnt)))

                model.add(
                    Bidirectional(
                        LSTM(units=params['LSTM-Size'][0],
                             activation=act,
                             recurrent_activation='hard_sigmoid',
                             recurrent_initializer=params['init'],
                             return_sequences=True,
                             dropout=params['dropout'][0],
                             recurrent_dropout=params['dropout'][1])))
            elif act == "leakyrelu" and False:
                model.add(
                    SimpleRNN(units=params['LSTM-Size'][0],
                              activation="linear",
                              recurrent_initializer=params['init'],
                              return_sequences=True,
                              dropout=params['dropout'][0],
                              recurrent_dropout=params['dropout'][1]))
                model.add(LeakyReLU(alpha=float(params["activation_flag"])))
            elif act == "prelu" and False:
                model.add(
                    SimpleRNN(units=params['LSTM-Size'][0],
                              activation="linear",
                              recurrent_initializer=params['init'],
                              return_sequences=True,
                              dropout=params['dropout'][0],
                              recurrent_dropout=params['dropout'][1]))
                model.add(PReLU(init='zero', weights=None))

        # Add LSTMs
        cnt = 1

        # Softmax Decoder
        if params['classifier'].lower() == 'softmax':
            model.add(
                TimeDistributed(Dense(len(
                    self.dataset['mappings'][self.labelKey]),
                                      activation='softmax'),
                                name='softmax_output'))
            lossFct = 'sparse_categorical_crossentropy'
        elif params['classifier'].lower() == 'crf':
            model.add(
                TimeDistributed(Dense(
                    len(self.dataset['mappings'][self.labelKey])),
                                name='hidden_layer'))
            crf = ChainCRF()
            model.add(crf)
            lossFct = crf.sparse_loss
        elif params['classifier'].lower() == 'tanh-crf':
            model.add(
                TimeDistributed(Dense(len(
                    self.dataset['mappings'][self.labelKey]),
                                      activation='tanh'),
                                name='hidden_layer'))
            crf = ChainCRF()
            model.add(crf)
            lossFct = crf.sparse_loss
        else:
            print("Please specify a valid classifier")
            assert (False)  # Wrong classifier

        optimizerParams = {}
        if 'clipnorm' in self.params and self.params[
                'clipnorm'] != None and self.params['clipnorm'] > 0:
            optimizerParams['clipnorm'] = self.params['clipnorm']

        if 'clipvalue' in self.params and self.params[
                'clipvalue'] != None and self.params['clipvalue'] > 0:
            optimizerParams['clipvalue'] = self.params['clipvalue']

        #if learning_rate in self.params:
        #    optimizerParams["learning_rate"] = self.params["learning_rate"]
        learning_rate = self.params["learning_rate"]

        if params['optimizer'].lower() == 'adam':
            opt = Adam(lr=learning_rate, **optimizerParams)
        elif params['optimizer'].lower() == 'nadam':
            opt = Nadam(lr=learning_rate, **optimizerParams)
        elif params['optimizer'].lower() == 'rmsprop':
            opt = RMSprop(lr=learning_rate, **optimizerParams)
        elif params['optimizer'].lower() == 'adadelta':
            opt = Adadelta(lr=learning_rate, **optimizerParams)
        elif params['optimizer'].lower() == 'adagrad':
            opt = Adagrad(lr=learning_rate, **optimizerParams)
        elif params['optimizer'].lower() == 'sgd':
            opt = SGD(lr=learning_rate, **optimizerParams)
        elif params['optimizer'].lower() == 'adamax':
            opt = Adamax(lr=learning_rate, **optimizerParams)

        model.compile(loss=lossFct, optimizer=opt)

        self.model = model
        if self.verboseBuild:
            model.summary()
            logging.debug(model.get_config())
            logging.debug("Optimizer: %s, %s" %
                          (str(type(opt)), str(opt.get_config())))
コード例 #59
0
def test_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    # TODO: factor out
    def data_generator(x, y, batch_size=50):
        index_array = np.arange(len(x))
        while 1:
            batches = make_batches(len(x_test), batch_size)
            for batch_index, (batch_start, batch_end) in enumerate(batches):
                batch_ids = index_array[batch_start:batch_end]
                x_batch = x[batch_ids]
                y_batch = y[batch_ids]
                yield (x_batch, y_batch)

    model = Sequential()
    model.add(Dense(num_hidden, input_shape=(input_dim,)))
    model.add(Activation('relu'))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1,
              validation_data=(x_test, y_test))
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2,
              validation_split=0.1)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1,
              shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test)

    prediction = model.predict_generator(data_generator(x_test, y_test), 1,
                                         max_queue_size=2, verbose=1)
    gen_loss = model.evaluate_generator(data_generator(x_test, y_test, 50), 1,
                                        max_queue_size=2)
    pred_loss = K.eval(K.mean(losses.get(model.loss)(K.variable(y_test),
                                                     K.variable(prediction))))

    assert(np.isclose(pred_loss, loss))
    assert(np.isclose(gen_loss, loss))

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)
    model = Sequential()
    model.add(Dense(num_hidden, input_shape=(input_dim,)))
    model.add(Activation('relu'))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert(loss == nloss)

    # Test serialization
    config = model.get_config()
    assert 'name' in config
    new_model = Sequential.from_config(config)
    assert new_model.weights  # Model should be built.

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
コード例 #60
0
def test_lambda():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    def func(X):
        s = X[0]
        for i in range(1, len(X)):
            s += X[i]
        return s

    def activation(X):
        return K.softmax(X)

    def output_shape(input_shapes):
        return input_shapes[0]

    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(LambdaMerge([left, right], function=func,
                          output_shape=output_shape))
    model.add(Dense(nb_class))
    model.add(Lambda(activation))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss < 0.8)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)
    model.get_config(verbose=0)

    # test weight saving
    fname = 'test_lambda_temp.h5'
    model.save_weights(fname, overwrite=True)
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))
    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))
    model = Sequential()
    model.add(LambdaMerge([left, right], function=func,
                          output_shape=output_shape))
    model.add(Dense(nb_class))
    model.add(Lambda(activation))
    model.load_weights(fname)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    os.remove(fname)

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)