Example #1
0
def test_shared_nested_sequential():
    input1 = Input(1)
    input2 = Input(1)
    in_seq = Sequential([dense(**params), Activation('linear')])
    seq = Sequential(
        [in_seq, dense(initial_weights=[W2, b2]),
         Activation('linear')])
    output = ElementWiseSum()([seq(input1), seq(input2)])
    feed_test([input1, input2], output, 2 * wpb2, 4)
Example #2
0
def test_multi_input_output():
    i1 = Input(1)
    i2 = Input(1)
    i3 = Input(1)
    i4 = Input(1)
    o1 = ElementWiseSum()([i1, i2])
    o2 = ElementWiseSum()([i3, i4])

    feed_test([i1, i2, i3, i4], [o1, o2],
              [np.array([[2]]), np.array([[2]])],
              0,
              multi_output=True)
Example #3
0
def test_check_input_shape():
    # Class inheriting Layer does not allow mutiple inputs
    with pytest.raises(KError):
        Sequential(Dense(1)([Input(1), Input(1)]))

    # Input dimension mismatch
    with pytest.raises(KError):
        input1 = Input((1, 1, 1))
        Dense(1)(input1)

    # Multiple inputs layer default accepts equal shape inputs
    with pytest.raises(KError):
        input1 = Input((1, 1, 1))
        Dense(1)(input1)
Example #4
0
def tets_fit():
    # forget to compile
    inp1 = Input(1, batch_size=1)
    d1 = Dense(1)
    model = Model(inp1, d1)
    with pytest.raises(KError):
        model.fit([[1]], [[1]])
Example #5
0
def test_nested_sequential():
    input1 = Input(1)
    in_seq = Sequential([dense(**params), Activation('linear')])
    output = Sequential(
        [in_seq, dense(initial_weights=[W2, b2]),
         Activation('linear')])(input1)
    feed_test(input1, output, wpb2, 4)
Example #6
0
def test_fit_evaluate_predict_spec():
    x = np.array([[1], [1]])
    y = np.array([[1], [1]])

    input1 = Input(1, name='input1')
    input2 = Input(1, name='input2')
    output1 = Dense(1, name='output1')(input1)
    output2 = Dense(1, name='output2')(input2)

    model0 = Model(input1, output1)
    model1 = Model([input1, input2], [output1, output2])
    model2 = Model([input1, input2], [output1, output2])

    model0.compile('sgd', 'mse', metrics=['acc'])
    model1.compile('sgd', loss=['mse', 'mse'], metrics=['acc'])
    model2.compile('sgd',
                   loss={
                       'output1': 'mse',
                       'output2': 'mse'
                   },
                   metrics={
                       'output1': 'acc',
                       'output2': 'acc'
                   })

    model0.predict([1, 1])
    model0.evaluate([1, 1], [1, 1])
    model0.fit([1, 1], [1, 1], nb_epoch=1)

    model1.predict([x, x])
    model1.evaluate([x, x], [y, y])
    model1.fit([x, x], [y, y], nb_epoch=1)

    model2.predict({'input1': x, 'input2': x})
    model2.evaluate({'input1': x, 'input2': x}, {'output1': y, 'output2': y})
    model2.fit({
        'input1': x,
        'input2': x
    }, {
        'output1': y,
        'output2': y
    },
               nb_epoch=1)
Example #7
0
def test_compile():
    inp1 = Input(1, batch_size=1)
    inp2 = Input(1, batch_size=2)
    d1 = Dense(1)
    d2 = Dense(1)

    def compile_model(inputs, outputs):
        model = Model(inputs, outputs)
        model.compile('sgd', 'mse')

    # input should be Input type
    with pytest.raises(KError):
        compile_model(d1, d2)

    with pytest.raises(KError):
        compile_model('whatever', d2)

    # input should be of Kensor type
    with pytest.raises(KError):
        compile_model(inp1, d2)

    # batch_size conflict
    with pytest.raises(KError):
        compile_model([inp1, inp2], d2)
Example #8
0
def test_feed_exceptions():

    # Forget to feed d1
    with pytest.raises(KError):
        d1 = Dense(1)
        Dense(1)(d1)

    # Forget to feed d1
    with pytest.raises(KError):
        d1 = Dense(1)
        Dense(1)(d1)

    # First layer of sequential should be input
    with pytest.raises(KError):
        s1 = Sequential([Dense(1)])
        s1.compile('sgd', 'mse')

    # Recursive feeding
    with pytest.raises(KError):
        input1 = Input(1)
        d = Dense(1)
        d1 = d(input1)
        d(d1)

    # Recursive feeding
    with pytest.raises(KError):
        i1 = Input(1)
        i2 = Input(1)
        i3 = Input(1)
        i4 = Input(1)
        m = ElementWiseSum()
        m1 = m([i1, i2])
        m2 = m([i3, i4])
        m([m1, m2])  # m'th output feeds to m again

    # shape should be assigned as a tuple, i.e. Input((1,2))
    with pytest.raises(KError):
        input1 = Input(1, 2)

    # You should not feed an Input layer
    with pytest.raises(KError):
        input1 = Input(1)(Input(1))
Example #9
0
def test_embedding():
    vocab_size = 5
    output_dim = 3

    W = np.random.rand(vocab_size, output_dim)

    layer_test(
        embeddings.Embedding(vocab_size, output_dim, initial_weights=[W]),
        [[0, 1, 2, 3, 4]], [W])

    # test dropout, just test if it can pass...
    layer_test(embeddings.Embedding(vocab_size, output_dim, dropout=0.2),
               [[0, 1, 2, 3, 4]],
               test_serialization=False)

    # test Embedding's support of mask
    input1 = Input(5, dtype='int32', mask_value=0)
    emb_oup = embeddings.Embedding(vocab_size, output_dim)(input1)
    assert emb_oup.tensor._keraflow_mask is not None
Example #10
0
def test_sgd():
    ''' math:
    Let W = [A, B], b = [C, D], y = [E, F]
    MSE = 1/2*[(A+C-E)^2 + (B+D-F)^2]
    dA, dB, dC, dD = (A+C-E), (B+D-F), (A+C-E), (B+D-F)
    Assume E = 2*(A+C), F = 2*(B+D)
    dA, dB, dC, dD = -(A+C), -(B+D), -(A+C), -(B+D)
    A-=lr*dA, B-=lr*dB, C-=lr*dC, D-=lr*dD
    '''
    lr = 0.01
    W = np.array([[1, 2]])
    b = np.array([3, 4])
    wpb = W+b
    model = Sequential([Input(1), Dense(2, initial_weights=[W, b])])
    optimizer = SGD(lr=lr)
    model.compile(optimizer, 'mse')
    model.fit([1], 2*wpb, nb_epoch=1)
    expectedW = W+lr*wpb
    expectedb = (b+lr*wpb).reshape((2,))
    assert_allclose(B.eval(model.layers[1].W), expectedW)
    assert_allclose(B.eval(model.layers[1].b), expectedb)
Example #11
0
def test_simplernn():
    def simplernn(W, U):
        def call(xt, ytm1):
            h = np.dot(xt, W)
            yt = h + np.dot(ytm1, U)
            return yt, [yt]

        return call

    W = np.ones((input_dim, output_dim))
    U = np.ones((output_dim, output_dim))
    run_test(recurrent.SimpleRNN,
             simplernn(W, U),
             num_states=1,
             activation='linear',
             initial_weights=[W, U])

    # test mask
    # we only test mask on SimpleRNN since the implementation is not dependent on each rnn.
    from keraflow.models import Sequential
    from keraflow.layers import Input, Embedding
    vocab_size = origin.shape[0]
    emb_dim = origin.shape[1]
    if B.name() == 'tensorflow':
        input_length = origin.shape[0]
    else:
        input_length = None

    model = Sequential([])
    model.add(Input(input_length, mask_value=1))
    model.add(Embedding(vocab_size, emb_dim, initial_weights=origin))
    model.add(
        recurrent.SimpleRNN(output_dim,
                            initial_weights=[W, U],
                            activation='linear'))
    model.compile('sgd', 'mse')
    exp_output = rnn([origin[:1]], simplernn(W, U), output_dim, num_states=1)
    assert_allclose(exp_output, model.predict([[0, 1]]))
    if input_length is None:
        assert_allclose(exp_output, model.predict([[0]]))
Example #12
0
def test_shared_sequential():
    input1 = Input(1)
    input2 = Input(1)
    shared = Sequential([dense(**params), dense(initial_weights=[W2, b2])])
    output = ElementWiseSum()([shared(input1), shared(input2)])
    feed_test([input1, input2], output, 2 * wpb2, 4)
Example #13
0
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'keraflow_cifar10_trained_model.json'
weight_name = 'keraflow_cifar10_trained_weights.hkl'

# The data, shuffled and split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

# Convert class vectors to binary class matrices.
y_train = keraflow.utils.to_categorical(y_train, num_classes)
y_test = keraflow.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(Input(x_train.shape[1:]))
model.add(Convolution2D(32, 3, 3, pad='same'))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(Pooling1D('max', pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Convolution2D(64, 3, 3, pad='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(Pooling1D('max', pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
Example #14
0
def test_sequential_multi_input():
    input1 = Input(1)
    input2 = Input(1)
    output = Sequential([ElementWiseSum(), dense(**params)])([input1, input2])
    feed_test([input1, input2], output, 2 * W + b, 2)
Example #15
0
X_train = pad_sequences(X_train, maxlen=maxlen)
X_test = pad_sequences(X_test, maxlen=maxlen)

# import numpy as np
# X_train = np.concatenate((X_train[:100],X_train[-100:]), axis=0)
# y_train = np.concatenate((y_train[:100],y_train[-100:]), axis=0)
# X_test = np.concatenate((X_test[:10],X_test[-10:]), axis=0)
# y_test = np.concatenate((y_test[:10],y_test[-10:]), axis=0)

print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)

print('Build model...')
model = Sequential()

model.add(Input(maxlen))

# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features, embedding_dims, dropout=0.2))

# we add a Convolution1D, which will learn nb_kernel
# word group filters of size kernel_row:
model.add(
    Convolution1D(nb_kernel=nb_kernel,
                  kernel_row=kernel_row,
                  pad='valid',
                  activation='relu',
                  stride=1))
# we use max pooling:
model.add(Pooling1D('max', pool_length=maxlen - 2))
Example #16
0
def test_sequential_layer():
    input1 = Input(1)
    output = Sequential([dense(**params),
                         dense(initial_weights=[W2, b2])])(input1)
    feed_test(input1, output, wpb2, 4)
Example #17
0
def test_sequential_as_input():
    seq = Sequential([Input(1), dense(**params)])
    output = dense(initial_weights=[W2, b2])(seq)
    feed_test(seq, output, wpb2, 4)
Example #18
0
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)

model = Sequential()
model.add(Input(784))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

history = model.fit(X_train, y_train,
                    batch_size=batch_size, nb_epoch=nb_epoch,
Example #19
0
def create_model(layer):
    model = Sequential([Input(1), layer])
    model.compile('sgd', 'mse')
    return model
Example #20
0
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)

model = Sequential()

model.add(Input((1, img_rows, img_cols)))
model.add(Convolution2D(nb_kernel, kernel_size[0], kernel_size[1],
                        pad='valid'))
model.add(Activation('relu'))
model.add(Convolution2D(nb_kernel, kernel_size[0], kernel_size[1]))
model.add(Activation('relu'))
model.add(Pooling2D('max', pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
Example #21
0
batch_size = 32

print('Loading data...')
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')

print('Pad sequences (samples x time)')
X_train = pad_sequences(X_train, maxlen=maxlen)
X_test = pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)

print('Build model...')
model = Sequential()
model.add(Input(None, dtype='int32'))
model.add(Embedding(max_features, 128))
model.add(LSTM(128, dropout_W=0.2, dropout_U=0.2))  # try using a GRU instead, for fun
model.add(Dense(1))
model.add(Activation('sigmoid'))

# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

print('Train...')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=15,
          validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)
print('Test score:', score)
Example #22
0
def create_model(**kwargs):
    model = Sequential([Input(1), Dense(2, **kwargs)])
    model.compile('sgd', 'mse')
    return model
Example #23
0
def test_single_input():
    input1 = Input(1)
    output = dense(**params)(input1)
    feed_test(input1, output, wpb, 2)
Example #24
0
def layer_test(layer,
               inp_vals,
               exp_output=None,
               random_exp={},
               multi_input=False,
               debug=False,
               input_args={},
               test_serialization=True,
               train_mode=True):
    if multi_input:
        input_vals = []
        for val in inp_vals:
            input_vals.append(np.asarray(val))
    else:
        input_vals = [np.asarray(inp_vals)]

    if exp_output is not None:
        exp_output = np.asarray(exp_output)

    if 'shape' in input_args:
        if 'batch_size' in input_args:
            input_shapes = [(input_args['batch_size'], ) + input_args['shape']]
        else:
            input_shapes = [(None, ) + input_args['shape']]
        del input_args['shape']
    else:
        input_shapes = [val.shape for val in input_vals]
    input_layers = [Input(shape[1:], **input_args) for shape in input_shapes]

    model = Model(input_layers, layer(unlist_if_one(input_layers)))
    model.compile('sgd', 'mse')

    output = model.predict(input_vals, train_mode=train_mode)[
        0]  # result of the first (ant the only) output channel
    output_shape = layer.output_shape(unlist_if_one(input_shapes))

    # check output(), output_shape() implementation
    cls_name = layer.__class__.__name__

    if debug:
        print(cls_name)
        if exp_output is not None:
            print("Expected Output:\n{}".format(exp_output))
        print("Output:\n{}".format(output))
        if exp_output is not None:
            print("Expected Output Shape:\n{}".format(exp_output.shape))
        print("Output shape:\n{}".format(output_shape))
        if debug == 2:
            import sys
            sys.exit(-1)

    if exp_output is not None:
        assert_allclose(
            output,
            exp_output,
            err_msg='===={}.output() incorrect!====\n'.format(cls_name))
        if None in output_shape:
            assert output_shape[0] is None
            assert_allclose(
                output_shape[1:],
                exp_output.shape[1:],
                err_msg='===={}.output_shape() incorrect!===='.format(
                    cls_name))
        else:
            assert_allclose(
                output_shape,
                exp_output.shape,
                err_msg='===={}.output_shape() incorrect!===='.format(
                    cls_name))
    else:
        # No exp_output, test if the shape of the output is the same as that provided by output_shape function.
        if None in output_shape:
            assert output_shape[0] is None
            assert_allclose(
                output_shape[1:],
                output.shape[1:],
                err_msg='===={}.output_shape() incorrect!===='.format(
                    cls_name))
        else:
            assert_allclose(
                output_shape,
                output.shape,
                err_msg='===={}.output_shape() incorrect!===='.format(
                    cls_name))

    lim = 1e-2
    if 'std' in random_exp:
        assert abs(output.std() - random_exp['std']) < lim
    if 'mean' in random_exp:
        assert abs(output.mean() - random_exp['mean']) < lim
    if 'max' in random_exp:
        assert abs(output.max() - random_exp['max']) < lim
    if 'min' in random_exp:
        assert abs(output.min() - random_exp['min']) < lim

    if test_serialization:
        # check if layer is ok for serialization
        arch_fname = '/tmp/arch_{}.json'.format(cls_name)
        weight_fname = '/tmp/weight_{}.hkl'.format(cls_name)
        if len(model.trainable_params) == 0:
            weight_fname = None

        model.save_to_file(arch_fname, weight_fname, overwrite=True, indent=2)
        try:
            model2 = Model.load_from_file(arch_fname, weight_fname)
        except:
            assert False, '====Reconstruction of the model fails. "{}" serialization problem!===='.format(
                cls_name)

        model2.compile('sgd', 'mse')
        model2_output = model2.predict(input_vals, train_mode=train_mode)[0]

        if len(random_exp) == 0:
            assert_allclose(
                output,
                model2_output,
                err_msg=
                '====Reconstructed model predicts different. "{}" serialization problem!====\n'
                .format(cls_name))
Example #25
0
def test_shared_layer():
    input1 = Input(1)
    input2 = Input(1)
    shared = dense(**params)
    output = ElementWiseSum()([shared(input1), shared(input2)])
    feed_test([input1, input2], output, 2 * wpb, 2)
Example #26
0
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

# Converts class vectors to binary class matrices.
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)

row, col, pixel = X_train.shape[1:]

# 4D input.
x = Input(shape=(row, col, pixel))

# Encodes a row of pixels using TimeDistributed Wrapper.
encoded_rows = TimeDistributed(LSTM(output_dim=row_hidden))(x)

# Encodes columns of encoded rows.
encoded_columns = LSTM(col_hidden)(encoded_rows)

# Final predictions and model.
prediction = Dense(nb_classes, activation='softmax')(encoded_columns)
model = Model(inputs=x, outputs=prediction)
model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

# Training.