Beispiel #1
0
def feed_test(inp_layers,
              oup_layers,
              expected_output,
              num_params,
              multi_output=False):
    inp_layers = to_list(inp_layers)
    oup_layers = to_list(oup_layers)
    model = Model(inp_layers, oup_layers)
    model.compile('sgd', ['mse'] * len(oup_layers))

    pred = model.predict([np.array([[1]])] * len(inp_layers))
    if not multi_output:
        expected_output = [expected_output]

    for p, e in zip(pred, expected_output):
        assert_allclose(p, e)

    # use caller_name to avoid race condition when conducting parallel testing
    caller_name = inspect.stack()[1][3]
    arch_fname = '/tmp/arch_{}.json'.format(caller_name)
    weight_fname = '/tmp/weight_{}.hkl'.format(caller_name)
    model.compile('sgd', ['mse'] * len(oup_layers))
    if len(model.trainable_params) == 0:
        weight_fname = None
    model.save_to_file(arch_fname, weight_fname, overwrite=True, indent=2)
    model2 = Model.load_from_file(arch_fname, weight_fname)
    model2.compile('sgd', ['mse'] * len(oup_layers))

    assert len(model.trainable_params) == len(
        model2.trainable_params) == num_params

    for p1, p2 in zip(model.trainable_params, model2.trainable_params):
        assert_allclose(B.eval(p1), B.eval(p1))

    for r1, r2 in zip(model.regularizers.values(),
                      model2.regularizers.values()):
        assert str(serialize(r1)) == str(serialize(r2))

    for c1, c2 in zip(model.constraints.values(), model2.constraints.values()):
        assert str(serialize(c1)) == str(serialize(c2))
Beispiel #2
0
def test_fit_evaluate_predict_spec():
    x = np.array([[1], [1]])
    y = np.array([[1], [1]])

    input1 = Input(1, name='input1')
    input2 = Input(1, name='input2')
    output1 = Dense(1, name='output1')(input1)
    output2 = Dense(1, name='output2')(input2)

    model0 = Model(input1, output1)
    model1 = Model([input1, input2], [output1, output2])
    model2 = Model([input1, input2], [output1, output2])

    model0.compile('sgd', 'mse', metrics=['acc'])
    model1.compile('sgd', loss=['mse', 'mse'], metrics=['acc'])
    model2.compile('sgd',
                   loss={
                       'output1': 'mse',
                       'output2': 'mse'
                   },
                   metrics={
                       'output1': 'acc',
                       'output2': 'acc'
                   })

    model0.predict([1, 1])
    model0.evaluate([1, 1], [1, 1])
    model0.fit([1, 1], [1, 1], nb_epoch=1)

    model1.predict([x, x])
    model1.evaluate([x, x], [y, y])
    model1.fit([x, x], [y, y], nb_epoch=1)

    model2.predict({'input1': x, 'input2': x})
    model2.evaluate({'input1': x, 'input2': x}, {'output1': y, 'output2': y})
    model2.fit({
        'input1': x,
        'input2': x
    }, {
        'output1': y,
        'output2': y
    },
               nb_epoch=1)
 def compile_model(inputs, outputs):
     model = Model(inputs, outputs)
     model.compile('sgd', 'mse')
Beispiel #4
0
def layer_test(layer,
               inp_vals,
               exp_output=None,
               random_exp={},
               multi_input=False,
               debug=False,
               input_args={},
               test_serialization=True,
               train_mode=True):
    if multi_input:
        input_vals = []
        for val in inp_vals:
            input_vals.append(np.asarray(val))
    else:
        input_vals = [np.asarray(inp_vals)]

    if exp_output is not None:
        exp_output = np.asarray(exp_output)

    if 'shape' in input_args:
        if 'batch_size' in input_args:
            input_shapes = [(input_args['batch_size'], ) + input_args['shape']]
        else:
            input_shapes = [(None, ) + input_args['shape']]
        del input_args['shape']
    else:
        input_shapes = [val.shape for val in input_vals]
    input_layers = [Input(shape[1:], **input_args) for shape in input_shapes]

    model = Model(input_layers, layer(unlist_if_one(input_layers)))
    model.compile('sgd', 'mse')

    output = model.predict(input_vals, train_mode=train_mode)[
        0]  # result of the first (ant the only) output channel
    output_shape = layer.output_shape(unlist_if_one(input_shapes))

    # check output(), output_shape() implementation
    cls_name = layer.__class__.__name__

    if debug:
        print(cls_name)
        if exp_output is not None:
            print("Expected Output:\n{}".format(exp_output))
        print("Output:\n{}".format(output))
        if exp_output is not None:
            print("Expected Output Shape:\n{}".format(exp_output.shape))
        print("Output shape:\n{}".format(output_shape))
        if debug == 2:
            import sys
            sys.exit(-1)

    if exp_output is not None:
        assert_allclose(
            output,
            exp_output,
            err_msg='===={}.output() incorrect!====\n'.format(cls_name))
        if None in output_shape:
            assert output_shape[0] is None
            assert_allclose(
                output_shape[1:],
                exp_output.shape[1:],
                err_msg='===={}.output_shape() incorrect!===='.format(
                    cls_name))
        else:
            assert_allclose(
                output_shape,
                exp_output.shape,
                err_msg='===={}.output_shape() incorrect!===='.format(
                    cls_name))
    else:
        # No exp_output, test if the shape of the output is the same as that provided by output_shape function.
        if None in output_shape:
            assert output_shape[0] is None
            assert_allclose(
                output_shape[1:],
                output.shape[1:],
                err_msg='===={}.output_shape() incorrect!===='.format(
                    cls_name))
        else:
            assert_allclose(
                output_shape,
                output.shape,
                err_msg='===={}.output_shape() incorrect!===='.format(
                    cls_name))

    lim = 1e-2
    if 'std' in random_exp:
        assert abs(output.std() - random_exp['std']) < lim
    if 'mean' in random_exp:
        assert abs(output.mean() - random_exp['mean']) < lim
    if 'max' in random_exp:
        assert abs(output.max() - random_exp['max']) < lim
    if 'min' in random_exp:
        assert abs(output.min() - random_exp['min']) < lim

    if test_serialization:
        # check if layer is ok for serialization
        arch_fname = '/tmp/arch_{}.json'.format(cls_name)
        weight_fname = '/tmp/weight_{}.hkl'.format(cls_name)
        if len(model.trainable_params) == 0:
            weight_fname = None

        model.save_to_file(arch_fname, weight_fname, overwrite=True, indent=2)
        try:
            model2 = Model.load_from_file(arch_fname, weight_fname)
        except:
            assert False, '====Reconstruction of the model fails. "{}" serialization problem!===='.format(
                cls_name)

        model2.compile('sgd', 'mse')
        model2_output = model2.predict(input_vals, train_mode=train_mode)[0]

        if len(random_exp) == 0:
            assert_allclose(
                output,
                model2_output,
                err_msg=
                '====Reconstructed model predicts different. "{}" serialization problem!====\n'
                .format(cls_name))
Beispiel #5
0
row, col, pixel = X_train.shape[1:]

# 4D input.
x = Input(shape=(row, col, pixel))

# Encodes a row of pixels using TimeDistributed Wrapper.
encoded_rows = TimeDistributed(LSTM(output_dim=row_hidden))(x)

# Encodes columns of encoded rows.
encoded_columns = LSTM(col_hidden)(encoded_rows)

# Final predictions and model.
prediction = Dense(nb_classes, activation='softmax')(encoded_columns)
model = Model(inputs=x, outputs=prediction)
model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

# Training.
model.fit(X_train,
          y_train,
          batch_size=batch_size,
          nb_epoch=nb_epochs,
          verbose=1,
          validation_data=(X_test, y_test))

# Evaluation.
scores = model.evaluate(X_test, y_test)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])