Esempio n. 1
0
def test_sequential_model_saving():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    model.add(RepeatVector(3))
    model.add(TimeDistributed(Dense(3)))
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy],
                  sample_weight_mode='temporal')
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    new_model = load_model(fname)
    os.remove(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test that new updates are the same with both models
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)
    new_model.train_on_batch(x, y)
    out = model.predict(x)
    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
Esempio n. 2
0
def test_saving_multiple_metrics_outputs():
    inputs = Input(shape=(5,))
    x = Dense(5)(inputs)
    output1 = Dense(1, name='output1')(x)
    output2 = Dense(1, name='output2')(x)

    model = Model(inputs=inputs, outputs=[output1, output2])

    metrics = {'output1': ['mse', 'binary_accuracy'],
               'output2': ['mse', 'binary_accuracy']
               }
    loss = {'output1': 'mse', 'output2': 'mse'}

    model.compile(loss=loss, optimizer='sgd', metrics=metrics)

    # assure that model is working
    x = np.array([[1, 1, 1, 1, 1]])
    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
Esempio n. 3
0
def test_model_saving_to_pre_created_h5py_file():
    inputs = Input(shape=(3,))
    x = Dense(2)(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.Adam(),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    with h5py.File(fname, mode='r+') as h5file:
        save_model(model, h5file)
        loaded_model = load_model(h5file)
        out2 = loaded_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test non-default options in h5
    with h5py.File('does not matter', driver='core',
                   backing_store=False) as h5file:
        save_model(model, h5file)
        loaded_model = load_model(h5file)
        out2 = loaded_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
Esempio n. 4
0
def test_sequential_model_saving_2():
    # test with custom optimizer, loss
    custom_opt = optimizers.rmsprop
    custom_loss = losses.mse
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    model.add(Dense(3))
    model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])

    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)
    out = model.predict(x)

    load_kwargs = {'custom_objects': {'custom_opt': custom_opt,
                                      'custom_loss': custom_loss}}
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)
    new_model_disk = load_model(fname, **load_kwargs)
    os.remove(fname)

    with tf_file_io_proxy('keras.engine.saving.tf_file_io') as file_io_proxy:
        gcs_filepath = file_io_proxy.get_filepath(filename=fname)
        save_model(model, gcs_filepath)
        file_io_proxy.assert_exists(gcs_filepath)
        new_model_gcs = load_model(gcs_filepath, **load_kwargs)
        file_io_proxy.delete_file(gcs_filepath)  # cleanup

    for new_model in [new_model_disk, new_model_gcs]:
        new_out = new_model.predict(x)
        assert_allclose(out, new_out, atol=1e-05)
Esempio n. 5
0
def test_sequential_model_saving():
    model = Sequential()
    model.add(Dense(2, input_dim=3))
    model.add(Dense(3))
    model.compile(loss='mse', optimizer='rmsprop', metrics=['acc'])

    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    fname = 'tmp_' + str(np.random.randint(10000)) + '.h5'
    save_model(model, fname)

    new_model = load_model(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test that new updates are the same with both models
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)
    new_model.train_on_batch(x, y)
    out = model.predict(x)
    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test load_weights on model file
    model.load_weights(fname)
    os.remove(fname)
Esempio n. 6
0
def test_functional_model_saving():
    inputs = Input(shape=(3,))
    x = Dense(2)(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.Adam(),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)
    out = model.predict(x)

    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)
    new_model_disk = load_model(fname)
    os.remove(fname)

    with tf_file_io_proxy('keras.engine.saving.tf_file_io') as file_io_proxy:
        gcs_filepath = file_io_proxy.get_filepath(filename=fname)
        save_model(model, gcs_filepath)
        file_io_proxy.assert_exists(gcs_filepath)
        new_model_gcs = load_model(gcs_filepath)
        file_io_proxy.delete_file(gcs_filepath)  # cleanup

    for new_model in [new_model_disk, new_model_gcs]:
        new_out = new_model.predict(x)
        assert_allclose(out, new_out, atol=1e-05)
Esempio n. 7
0
def test_sequential_model_saving_2():
    # test with funkier config
    model = Sequential()
    model.add(Dense(2, input_dim=3))
    model.add(RepeatVector(3))
    model.add(TimeDistributed(Dense(3)))
    model.compile(loss=objectives.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy],
                  sample_weight_mode='temporal')
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    fname = 'tmp_' + str(np.random.randint(10000)) + '.h5'
    save_model(model, fname)

    new_model = load_model(fname)
    os.remove(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test that new updates are the same with both models
    x = np.random.random((1, 3))
    y = np.random.random((1, 3, 3))
    model.train_on_batch(x, y)
    new_model.train_on_batch(x, y)
    out = model.predict(x)
    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
Esempio n. 8
0
def test_saving_without_compilation():
    model = Sequential()
    model.add(Dense(2, input_dim=3))
    model.add(Dense(3))
    model.compile(loss='mse', optimizer='sgd', metrics=['acc'])

    fname = 'tmp_' + str(np.random.randint(10000)) + '.h5'
    save_model(model, fname)
    model = load_model(fname)
Esempio n. 9
0
def test_model_save_load_binary_in_memory():
    model, x = _get_sample_model_and_input()
    out = model.predict(x)

    stream = io.BytesIO()
    save_model(model, stream)
    stream.seek(0)
    loaded_model = load_model(stream)
    out2 = loaded_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
Esempio n. 10
0
def test_saving_without_compilation():
    model = Sequential()
    model.add(Dense(2, input_dim=3))
    model.add(Dense(3))
    model.compile(loss='mse', optimizer='sgd', metrics=['acc'])

    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)
    model = load_model(fname)
    os.remove(fname)
Esempio n. 11
0
def test_saving_without_compilation():
    """Test saving model without compiling.
    """
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    model.add(Dense(3))

    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)
    model = load_model(fname)
    os.remove(fname)
Esempio n. 12
0
def test_saving_right_after_compilation():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    model.add(Dense(3))
    model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
    model.model._make_train_function()

    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)
    model = load_model(fname)
    os.remove(fname)
Esempio n. 13
0
def test_saving_right_after_compilation():
    model = Sequential()
    model.add(Dense(2, input_dim=3))
    model.add(Dense(3))
    model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
    model.model._make_train_function()

    fname = 'tmp_' + str(np.random.randint(10000)) + '.h5'
    save_model(model, fname)
    model = load_model(fname)
    os.remove(fname)
Esempio n. 14
0
def test_saving_unused_layers_is_ok():
    a = Input(shape=(256, 512, 6))
    b = Input(shape=(256, 512, 1))
    c = Lambda(lambda x: x[:, :, :, :1])(a)

    model = Model(inputs=[a, b], outputs=c)

    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)
    load_model(fname)
    os.remove(fname)
Esempio n. 15
0
def test_saving_constant_initializer_with_numpy():
    """Test saving and loading model of constant initializer with numpy ndarray as input.
    """
    model = Sequential()
    model.add(Dense(2, input_shape=(3,), kernel_initializer=Constant(np.ones((3, 2)))))
    model.add(Dense(3))
    model.compile(loss='mse', optimizer='sgd', metrics=['acc'])

    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)
    model = load_model(fname)
    os.remove(fname)
Esempio n. 16
0
def test_model_loading_from_binary_stream():
    model, x = _get_sample_model_and_input()
    out = model.predict(x)

    with temp_filename('h5') as fname:
        # save the model the usual way
        with h5py.File(fname, mode='w') as h5file:
            save_model(model, h5file)
        # Load the data binary, and make sure the model is intact.
        with open(fname, 'rb') as raw_file:
            loaded_model = load_model(raw_file)
    out2 = loaded_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
Esempio n. 17
0
def test_model_saving_to_binary_stream():
    model, x = _get_sample_model_and_input()
    out = model.predict(x)

    with temp_filename('h5') as fname:
        # save directly to binary file
        with open(fname, 'wb') as raw_file:
            save_model(model, raw_file)
        # Load the data the usual way, and make sure the model is intact.
        with h5py.File(fname, mode='r') as h5file:
            loaded_model = load_model(h5file)
    out2 = loaded_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
Esempio n. 18
0
def save_model_to_hdf5_group(model, f):
    # Use Keras save_model to save the full model (including optimizer
    # state) to a file.
    # Then we can embed the contents of that HDF5 file inside ours.
    tempfd, tempfname = tempfile.mkstemp(prefix='tmp-betago')
    try:
        os.close(tempfd)
        save_model(model, tempfname)
        serialized_model = h5py.File(tempfname, 'r')
        root_item = serialized_model.get('/')
        serialized_model.copy(root_item, f, 'kerasmodel')
        serialized_model.close()
    finally:
        os.unlink(tempfname)
def test_saving_lambda_numpy_array_arguments():
    mean = np.random.random((4, 2, 3))
    std = np.abs(np.random.random((4, 2, 3))) + 1e-5
    inputs = Input(shape=(4, 2, 3))
    outputs = Lambda(lambda image, mu, std: (image - mu) / std,
                     arguments={'mu': mean, 'std': std})(inputs)
    model = Model(inputs, outputs)
    model.compile(loss='mse', optimizer='sgd', metrics=['acc'])

    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)
    os.remove(fname)

    assert_allclose(mean, model.layers[1].arguments['mu'])
    assert_allclose(std, model.layers[1].arguments['std'])
Esempio n. 20
0
def test_saving_lambda_numpy_array_arguments():
    mean = np.random.random((4, 2, 3))
    std = np.abs(np.random.random((4, 2, 3))) + 1e-5
    inputs = Input(shape=(4, 2, 3))
    outputs = Lambda(lambda image, mu, std: (image - mu) / std,
                     arguments={'mu': mean, 'std': std})(inputs)
    model = Model(inputs, outputs)
    model.compile(loss='mse', optimizer='sgd', metrics=['acc'])

    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)
    os.remove(fname)

    assert_allclose(mean, model.layers[1].arguments['mu'])
    assert_allclose(std, model.layers[1].arguments['std'])
Esempio n. 21
0
def test_saving_model_with_long_weights_names():
    x = Input(shape=(2, ), name='nested_model_input')
    f = x
    for i in range(4):
        f = Dense(2, name='nested_model_dense_%d' % (i, ))(f)
    # This layer name will make the `weights_name`
    # HDF5 attribute blow out of proportion.
    f = Dense(2, name='nested_model_output' + ('x' * (2**15)))(f)
    nested_model = Model(inputs=[x], outputs=[f], name='nested_model')

    x = Input(shape=(2, ), name='outer_model_input')
    f = nested_model(x)
    f = Dense(2, name='outer_model_output')(f)

    model = Model(inputs=[x], outputs=[f])

    model.compile(loss='mse', optimizer='adam', metrics=['acc'])

    x = np.random.random((1, 2))
    y = np.random.random((1, 2))
    model.train_on_batch(x, y)

    out = model.predict(x)

    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)

    # Check that the HDF5 files contains chunked array
    # of weight names.
    with h5py.File(fname, 'r') as h5file:
        attrs = [
            attr for attr in h5file['model_weights']['nested_model'].attrs
            if attr.startswith('weight_names')
        ]
        n_weight_names_arrays = len(attrs)

    os.remove(fname)

    # The chunking of layer names array should have happened.
    assert n_weight_names_arrays > 0

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
Esempio n. 22
0
def test_functional_model_saving():
    img_rows, img_cols = 32, 32
    img_channels = 3

    # Parameters for the DenseNet model builder
    img_dim = (img_channels, img_rows,
               img_cols) if K.image_data_format() == 'channels_first' else (
                   img_rows, img_cols, img_channels)
    depth = 40
    nb_dense_block = 3
    growth_rate = 3  # number of z2 maps equals growth_rate * group_size, so keep this small.
    nb_filter = 16
    dropout_rate = 0.0  # 0.0 for data augmentation
    conv_group = 'D4'  # C4 includes 90 degree rotations, D4 additionally includes reflections in x and y axis.
    use_gcnn = True

    # Create the model (without loading weights)
    model = GDenseNet(mc_dropout=False,
                      padding='same',
                      nb_dense_block=nb_dense_block,
                      growth_rate=growth_rate,
                      nb_filter=nb_filter,
                      dropout_rate=dropout_rate,
                      weights=None,
                      input_shape=img_dim,
                      depth=depth,
                      use_gcnn=use_gcnn,
                      conv_group=conv_group)
    model.compile(loss=losses.categorical_crossentropy,
                  optimizer=optimizers.Adam(),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 32, 32, 3))
    y = np.random.randint(0, 10, 1)
    y = np_utils.to_categorical(y, 10)
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
Esempio n. 23
0
def cnn_lstm_main(col='toxic', epochs=2, ver=0, mode='val'):
    embedding_dims = 128
    filters = 64
    kernel_size = 5
    lstm_output_size = 64
    batch_size = 32
    dropout = 0.5

    print('loading data')
    df_train = pd.read_csv(util.train_data)
    y = df_train[col].values
    if ver == 1:
        X = pickle.load(open(util.tmp_padded_seq_train_ver1, 'rb'))
    else:
        X = pickle.load(open(util.tmp_padded_seq_train, 'rb'))
    print(X.shape, y.shape)

    model = Sequential()
    model.add(
        Embedding(util.num_words, embedding_dims, input_length=util.maxlen))
    model.add(Dropout(dropout))
    model.add(Conv1D(filters, kernel_size, activation='relu'))
    model.add(MaxPooling1D())

    model.add(LSTM(lstm_output_size))
    model.add(Dense(1, activation='sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print('trainging cnn_lstm model for %s...' % col)
    if mode == 'val':
        tb = TensorBoard(log_dir=util.cnn_lstm_ver0_log, histogram_freq=1)
        model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, \
           validation_split=0.2, verbose=2, callbacks=[tb])
    else:
        model, _metrics = util.get_cv_logloss(model, X, y)

    print('saving model')
    if ver == 1:
        saving_path = util.cnn_lstm_ver1 + '_' + col
    else:
        saving_path = util.cnn_lstm_ver0 + '_' + col
    save_model(model, saving_path)
Esempio n. 24
0
    def save(self, dir=None, file_name=None):
        assert os.path.exists(dir), 'Directory does not exist'

        if file_name is None:
            file_name = self.name

        cfg = self.to_json()

        if self.is_built():
            cfg['weights'] = {
                'fm': os.path.join(dir, file_name + '_fm.h5'),
                'rm': os.path.join(dir, file_name + '_rm.h5')
            }

            k_models.save_model(getattr(self._fm, '_model'), cfg['weights']['fm'])
            k_models.save_model(getattr(self._rm, '_model'), cfg['weights']['rm'])

        save_json(cfg, os.path.join(dir, file_name+'.json'))
Esempio n. 25
0
def test_functional_model_saving():
    model, x = _get_sample_model_and_input()
    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)
    new_model_disk = load_model(fname)
    os.remove(fname)

    with tf_file_io_proxy('keras.engine.saving.tf_file_io') as file_io_proxy:
        gcs_filepath = file_io_proxy.get_filepath(filename=fname)
        save_model(model, gcs_filepath)
        file_io_proxy.assert_exists(gcs_filepath)
        new_model_gcs = load_model(gcs_filepath)
        file_io_proxy.delete_file(gcs_filepath)  # cleanup

    for new_model in [new_model_disk, new_model_gcs]:
        new_out = new_model.predict(x)
        assert_allclose(out, new_out, atol=1e-05)
Esempio n. 26
0
def test_functional_model_saving():
    model, x = _get_sample_model_and_input()
    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)
    new_model_disk = load_model(fname)
    os.remove(fname)

    with tf_file_io_proxy('keras.engine.saving.tf_file_io') as file_io_proxy:
        gcs_filepath = file_io_proxy.get_filepath(filename=fname)
        save_model(model, gcs_filepath)
        file_io_proxy.assert_exists(gcs_filepath)
        new_model_gcs = load_model(gcs_filepath)
        file_io_proxy.delete_file(gcs_filepath)  # cleanup

    for new_model in [new_model_disk, new_model_gcs]:
        new_out = new_model.predict(x)
        assert_allclose(out, new_out, atol=1e-05)
Esempio n. 27
0
    def save_model(self,
                   name="trained_1",
                   basepath="../data/models/",
                   save_structure=False):
        # if not name.endswith(".h5"):
        #     name += ".h5"
        print(f"Saving model to: '{basepath}{name}'...")

        if not os.path.exists(basepath):
            os.makedirs(basepath)

        if save_structure:
            model_json = self.model.to_json()
            with open(f"{basepath}{name}.json", "w") as fp:
                fp.write(model_json)

        save_model(self.model, f"{basepath}{name}")
        print("Saving complete.")
Esempio n. 28
0
def test_saving_model_with_long_weights_names():
    x = Input(shape=(2,), name='nested_model_input')
    f = x
    for i in range(4):
        f = Dense(2, name='nested_model_dense_%d' % (i,))(f)
    f = Dense(2, name='nested_model_dense_4', trainable=False)(f)
    # This layer name will make the `weights_name`
    # HDF5 attribute blow out of proportion.
    f = Dense(2, name='nested_model_output' + ('x' * (2**15)))(f)
    nested_model = Model(inputs=[x], outputs=[f], name='nested_model')

    x = Input(shape=(2,), name='outer_model_input')
    f = nested_model(x)
    f = Dense(2, name='outer_model_output')(f)

    model = Model(inputs=[x], outputs=[f])

    model.compile(loss='mse', optimizer='adam', metrics=['acc'])

    x = np.random.random((1, 2))
    y = np.random.random((1, 2))
    model.train_on_batch(x, y)

    out = model.predict(x)

    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)

    # Check that the HDF5 files contains chunked array
    # of weight names.
    with h5py.File(fname, 'r') as h5file:
        attrs = [attr for attr in h5file['model_weights']['nested_model'].attrs
                 if attr.startswith('weight_names')]
        n_weight_names_arrays = len(attrs)

    os.remove(fname)

    # The chunking of layer names array should have happened.
    assert n_weight_names_arrays > 0

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
Esempio n. 29
0
def main(model_name, adv_model_names, model_type):
    np.random.seed(0)
    assert keras.backend.backend() == "tensorflow"
    set_mnist_flags()

    flags.DEFINE_bool('NUM_EPOCHS', args.epochs, 'Number of epochs')

    # Get MNIST test data
    X_train, Y_train, X_test, Y_test = data_mnist()

    data_gen = data_gen_mnist(X_train)

    x = K.placeholder(shape=(None, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS,
                             FLAGS.NUM_CHANNELS))

    y = K.placeholder(shape=(FLAGS.BATCH_SIZE, FLAGS.NUM_CLASSES))

    eps = args.eps

    # if src_models is not None, we train on adversarial examples that come
    # from multiple models
    adv_models = [None] * len(adv_model_names)
    for i in range(len(adv_model_names)):
        adv_models[i] = load_model(adv_model_names[i])

    model = model_mnist(type=model_type)

    x_advs = [None] * (len(adv_models) + 1)

    for i, m in enumerate(adv_models + [model]):
        logits = m(x)
        grad = gen_grad(x, logits, y, loss='training')
        x_advs[i] = symbolic_fgs(x, grad, eps=eps)

    # Train an MNIST model
    tf_train(x, y, model, X_train, Y_train, data_gen, x_advs=x_advs)

    # Finally print the result!
    test_error = tf_test_error_rate(model, x, X_test, Y_test)
    print('Test error: %.1f%%' % test_error)
    save_model(model, model_name)
    json_string = model.to_json()
    with open(model_name + '.json', 'wr') as f:
        f.write(json_string)
Esempio n. 30
0
def train(model, model_label, training_data, batch_size=256, epochs=10):
    (x_train, y_train), (x_test, y_test), mapping, nb_classes = training_data

    # convert class vectors to binary class matrices
    y_train = np_utils.to_categorical(y_train, nb_classes)
    y_test = np_utils.to_categorical(y_test, nb_classes)

    STAMP = model_label
    print('Training model {}'.format(STAMP))
    logs_path = './logs/{}'.format(STAMP)

    bst_model_path = './checkpoints/' + STAMP + '.h5'
    early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss',
                                                   patience=3)
    model_checkpoint = keras.callbacks.ModelCheckpoint(bst_model_path,
                                                       save_best_only=True,
                                                       save_weights_only=True,
                                                       verbose=1)
    tensor_board = keras.callbacks.TensorBoard(log_dir=logs_path,
                                               histogram_freq=0,
                                               write_graph=True,
                                               write_images=False)
    reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                  patience=1)

    model.fit(
        x_train,
        y_train,
        batch_size=batch_size,
        epochs=epochs,
        verbose=1,
        shuffle=True,
        validation_data=(x_test, y_test),
        callbacks=[early_stopping, model_checkpoint, tensor_board, reduce_lr])

    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test score:', score[0])
    print('Test accuracy:', score[1])

    # Offload model to file
    model_yaml = model.to_yaml()
    with open("bin/" + STAMP + ".yaml", "w") as yaml_file:
        yaml_file.write(model_yaml)
    save_model(model, 'bin/' + STAMP + 'model.h5')
Esempio n. 31
0
    def on_epoch_end(self, epoch, logs={}):
        #y_pred = self.model.predict(self.model.validation_data[0])
        # there 7578 frames in test set
        model_has_validation_data = hasattr(self.model, 'validation_data')
        print(model_has_validation_data)
        if model_has_validation_data:
            test_mean_error = predictSequence(epoch, self.model, 5000,
                                              self.model.validation_data[0],
                                              self.model.validation_data[1])
            self.test_mean_errors.append(test_mean_error)

        # Save the model at every epoch end
        print("Saving trained model...")
        model_prefix = 'CNN_LSTM_scratch_v3'
        model_path = "../trained_models/" + model_prefix + ".h5"
        save_model(
            self.model, model_path, overwrite=True
        )  # saves weights, network topology and optimizer state (if any)
        return
Esempio n. 32
0
def main():
    X_train_orig, Y_train_orig, _, _, classes = load_dataset()

    # Normalise image vectors.
    #X_train = preprocess_input(X_train_orig)
    X_train = X_train_orig / 255
    # Convert training and test labels to one hot matrices.
    Y_train = convert_to_one_hot(Y_train_orig, 6).T

    model = ResNet50(input_shape=(64, 64, 3), classes=len(classes))
    model.compile(optimizer="adam",
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])
    model.fit(x=X_train, y=Y_train, batch_size=32, epochs=20)
    save_model(model, "resnets_signs_model.h5")

    #model.summary()
    plot_model(model, to_file="sign_model.png")
    SVG(model_to_dot(model).create(prog="dot", format="svg"))
Esempio n. 33
0
    def train(self, lr=1e-2, epochs=1):

        remove(self.log_path)
        model = self.build_model(lr)
        model.summary()

        X_train, X_test, y_train, y_test = self.get_data()
        model.fit(x=X_train,
                  y=y_train,
                  batch_size=self.batch_size,
                  epochs=epochs,
                  verbose=1,
                  callbacks=self.get_callbacks(),
                  validation_data=(X_test, y_test),
                  shuffle=True,
                  initial_epoch=0,
                  steps_per_epoch=None, validation_steps=None)

        save_model(model, self.last_model_path)
Esempio n. 34
0
def train(model, training_data, callback=True, batch_size=256, epochs=10):
    print("starting train funct")
    (x_train, y_train), (x_test, y_test), mapping, nb_classes = training_data

    # convert class vectors to binary class matrices
    y_train = np_utils.to_categorical(y_train, nb_classes)
    y_test = np_utils.to_categorical(y_test, nb_classes)
    y_train = np.squeeze(y_train, axis=1)  # make it from 3d to 2d
    y_test = np.squeeze(y_test, axis=1)  # make it from 3d to 2d

    if callback == True:
        # Callback for analysis in TensorBoard
        tbCallBack = keras.callbacks.TensorBoard(
            log_dir='./OverfitByClassGraph',
            histogram_freq=0,
            write_graph=True,
            write_images=True)

    print("Beginning training")

    print(y_train.shape)
    print(y_test.shape)
    print("______________")
    print(x_train.shape)
    print(x_test.shape)

    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              validation_data=(x_test, y_test),
              callbacks=[tbCallBack] if callback else None)

    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test score:', score[0])
    print('Test accuracy:', score[1])

    # Offload model to file
    model_yaml = model.to_yaml()
    with open("bin/overfit_byclass_model.yaml", "w") as yaml_file:
        yaml_file.write(model_yaml)
    save_model(model, 'bin/overfit_byclass_model.h5')
Esempio n. 35
0
def train_best_conv_net(best_combo, n, train_labels, test_meta, test_labels,
                        sample_scores, out_path):
    try:
        print("Train Conv Net")
        train_data = np.load(join(out_path, "best_train_data.npy"))
        test_data = np.load(join(out_path, "best_test_data.npy"))
        environ["CUDA_VISIBLE_DEVICES"] = "{0:d}".format(0)
        session = K.tf.Session(
            config=K.tf.ConfigProto(allow_soft_placement=False,
                                    gpu_options=K.tf.GPUOptions(
                                        allow_growth=True),
                                    log_device_placement=False))
        K.set_session(session)
        hail_conv_net_model = hail_conv_net(**best_combo)
        hail_conv_net_model.fit(train_data,
                                train_labels,
                                batch_size=best_combo["batch_size"],
                                epochs=best_combo["num_epochs"],
                                verbose=2)
        print("Scoring Conv Net")
        test_preds = hail_conv_net_model.predict(test_data).ravel()
        test_pred_frame = test_meta.copy(deep=True)
        test_pred_frame["conv_net"] = test_preds
        test_pred_frame["label"] = test_labels
        test_pred_frame.to_csv(join(
            out_path, "predictions_conv_net_sample_{0:03d}.csv".format(n)),
                               index_label="Index")
        sample_scores.loc[n,
                          "Brier Score"] = brier_score(test_labels, test_preds)
        sample_scores.loc[n, "Brier Score Climo"] = brier_score(
            test_labels, test_labels.mean())
        sample_scores.loc[n, "Brier Skill Score"] = brier_skill_score(
            test_labels, test_preds)
        sample_scores.loc[n, "AUC"] = roc_auc_score(test_labels, test_preds)
        save_model(hail_conv_net_model,
                   join(out_path, "hail_conv_net_sample_{0:03d}.h5".format(n)))
        session.close()
        del session
        del hail_conv_net_model
        return sample_scores
    except Exception as e:
        print(traceback.format_exc())
        raise e
Esempio n. 36
0
def train(args):
    # early_stopping = EarlyStopping(monitor='val_loss', patience=100)
    save_base_model = SaveBaseModel(
        args.check_point + '/' +
        'tencent_speech_epoch.weights.{epoch:03d}-{val_loss:.2f}.hdf5',
        base_model)
    #     model_checkpoint = ModelCheckpoint(args.check_point + '/' + 'deep_speech2_epoch.{epoch:03d}-{val_loss:.2f}.hdf5',
    #                                        save_best_only=True, save_weights_only=True)
    history = model.fit(
        x=[train_inputs, train_input_length, train_y_true, train_label_length],
        y=np.ones(train_y_true.shape[0]),
        validation_data=([
            dev_inputs, dev_input_length, dev_y_true, dev_label_length
        ], np.ones(dev_y_true.shape[0])),
        epochs=args.nb_epoch,
        batch_size=args.batch_size,
        shuffle=False,
        callbacks=[save_base_model])
    save_model(base_model, './tencent_deep_speech.model')
Esempio n. 37
0
def train(model, data, path=None):

    simple_dataset = data

    Y = simple_dataset[['bias', 'not_bias']]
    X = simple_dataset.drop(columns=['bias', 'not_bias'], axis=1)

    print(X.keys())
    print(Y.head())

    x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)

    batch_size = 30

    model.fit(x=x_train, y=y_train, batch_size=batch_size, epochs=50)

    print(model.evaluate(x=x_test, y=y_test, batch_size=batch_size))
    if path != None:
        save_model(model, filepath=path)
Esempio n. 38
0
    def fit(self):
        """

        :return:
                object
                    Returns an object of class `keras.models.Sequential`.
        """
        if os.path.exists(self.model_):
            log.info('Fully-trained model found.')
            self.model = load_model(self.model_)

            return self.model

        elif os.path.exists(self.ckpt_dir):
            log.info('LSTM-model checkpoint found.')
            self.model = load_model(self.ckpt_dir)

            self.model.fit(self.X,
                           self.y,
                           batch_size=256,
                           epochs=20,
                           callbacks=[self.checkpointer, self.earlystopping])

            return self.model

        else:
            self.model.compile(loss='categorical_crossentropy',
                               optimizer=self.optimizer)
            log.info("Commencing model fitting. Stand by ...")
            time.sleep(0.5)

            # for i in range(1, 30):
            # print('Iteration: ', i)
            self.model.fit(self.X,
                           self.y,
                           batch_size=256,
                           epochs=20,
                           callbacks=[self.checkpointer, self.earlystopping])
            log.info("LSTM-model successfully fitted.")
            save_model(self.model, filepath=self.model_)
            log.info("LSTM-model dumped at 'model'.")

            return self.model
Esempio n. 39
0
def cnnTest():
	fd = open("train_log.txt","w")
	# 모델 구성(2(input) -> CONV(ReLU) -> CONV(ReLU) -> FC(sigmoid))
	model = km.Sequential()
	# 입력 (None,28,28,1)
	# 출력 (None,28,28,32)
	model.add(kl.Conv2D(input_shape=(28, 28, 1), filters=32,
	                    kernel_size=(3, 3), strides=1,
	                    padding='same'))    # zero-padding
	model.add(kl.Activation('relu'))
	# 입력 (None,28.28,32)
	# 출력 (None,26,26,32)
	model.add(kl.Conv2D(filters=32,
	                    kernel_size=(3, 3), strides=1))
	model.add(kl.Activation('relu'))
	# 입력 (None,26,26,32)
	# 출력 (None,13,13,32) 
	model.add(kl.MaxPooling2D(pool_size=(2,2)))
	model.add(kl.Dropout(0.25))
	# 출력 (None,13*13*32)
	model.add(kl.Flatten())
	model.add(kl.Dense(128,activation='relu'))
	model.add(kl.Dropout(0.5))
	model.add(kl.Dense(10,activation='softmax'))

	# 학습 설정
	model.compile(loss='categorical_crossentropy',
	               optimizer='adam', metrics=['accuracy'])

	# 모델 구조 그리기
	ku.plot_model(model, 'model.png')

	# 학습(10회 반복, 10000개 샘플씩 배치 학습)
	for epoch in range(maxEpoch):
		#trLoss = model.train_on_batch(trDataList,trLabelList)
		model.fit(trDataList,trLabelList,batch_size=10000,epochs=1)
		score = model.evaluate(trDataList,trLabelList,verbose=0)
		fd.write(("Epoch %d/%d, error rate : %f\n")%(epoch+1,maxEpoch,100-score[1]*100))

	fd.close()

	km.save_model(model,'best_param.h5')
Esempio n. 40
0
    def train(self):
        epochs = self.max_epochs - self.epoch

        # logger.info("model saved: %s.", args.checkpoint_path)
        # callbacks
        checkpoint_path = tempfile.mktemp()
        callbacks = [
            ModelCheckpoint(checkpoint_path, verbose=1, save_best_only=False),
            LoggerCallback(self)
        ]

        save_model(self.model, checkpoint_path)

        # training start
        num_training_batches = (len(self.training) - 1) // (self.batch_size *
                                                            self.seq_len)
        num_validation_batches = (len(self.validation) -
                                  1) // (self.batch_size * self.seq_len)
        self.model.reset_states()
        try:
            self.model.fit_generator(
                initial_epoch=self.epoch,
                generator=self._batch_generator(self.training,
                                                self.batch_size,
                                                self.seq_len,
                                                one_hot_labels=True),
                steps_per_epoch=num_training_batches,
                validation_data=self._batch_generator(self.validation,
                                                      self.batch_size,
                                                      self.seq_len,
                                                      one_hot_labels=True),
                validation_steps=num_validation_batches,
                epochs=epochs,
                callbacks=callbacks)
        except KeyboardInterrupt as interrupted:
            # Restore the last saved checkpoint here
            self.model = load_model(checkpoint_path)
            if self.epoch == 0:
                print(
                    'This model has not finished a complete epoch, weights will be saved without configuration.'
                )
            raise interrupted
Esempio n. 41
0
def test_saving_model_with_long_layer_names():
    # This layer name will make the `layers_name` HDF5 attribute blow
    # out of proportion. Note that it fits into the internal HDF5
    # attribute memory limit on its own but because h5py converts
    # the list of layer names into numpy array, which uses the same
    # amout of memory for every item, it increases the memory
    # requirements substantially.
    x = Input(shape=(2, ), name='input_' + ('x' * (2**15)))
    f = x
    for i in range(4):
        f = Dense(2, name='dense_%d' % (i, ))(f)

    model = Model(inputs=[x], outputs=[f])

    model.compile(loss='mse', optimizer='adam', metrics=['acc'])

    x = np.random.random((1, 2))
    y = np.random.random((1, 2))
    model.train_on_batch(x, y)

    out = model.predict(x)

    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)

    # Check that the HDF5 files contains chunked array
    # of layer names.
    with h5py.File(fname, 'r') as h5file:
        n_layer_names_arrays = len([
            attr for attr in h5file['model_weights'].attrs
            if attr.startswith('layer_names')
        ])

    os.remove(fname)

    # The chunking of layer names array should have happened.
    assert n_layer_names_arrays > 0

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
Esempio n. 42
0
    def make_checkpoint(self, gen, steps, rewards, t_start, p):
        if self.checkpoint_every and (gen is None
                                      or gen % self.checkpoint_every == 0):

            # for i in range(n_tests):
            #     tup = self.fitnessfun(self.env, self.model)
            #     test_reward += tup[0]
            #     wins += tup[2]

            # Test current model
            n_tests = 30
            inputs = [{
                'env': env,
                'model': model
            } for env, model in zip([self.env] * n_tests, [self.model] *
                                    n_tests)]
            output = p.map(self.evaluate_fitness, inputs)
            test_rewards = [d['reward'] for d in output]
            test_rewards.extend([d['reward_antithetic'] for d in output])
            wins = [d['win'] for d in output]
            wins.extend([d['win_antithetic'] for d in output])
            test_reward = np.mean(test_rewards)
            win_rate = np.sum(wins) / len(wins)

            # Store results
            self.results['generations'].append(gen)
            self.results['steps'].append(steps)
            self.results['test_rewards'].append(test_reward)
            self.results['time'].append(time.time() - t_start)
            self.results['weight_norm'].append(
                self.get_weight_norms(self.weights))
            self.results['mean_pop_rewards'].append(np.mean(rewards))
            self.results['win_rate'].append(win_rate)
            self.print_progress(gen)

            # Save model
            save_model(self.model, os.path.join(self.save_dir, 'model.h5'))
            with open(os.path.join(self.save_dir, 'model.json'), 'w') as f:
                f.write(self.model.to_json())
            # Save results
            with open(os.path.join(self.save_dir, 'results.pkl'), 'wb') as f:
                pickle.dump(self.results, f, protocol=pickle.HIGHEST_PROTOCOL)
    def train_KerasBinaryClassifier(self, X_train, y_train):

        # Use Tenserflow backend
        sess = tf.Session()
        K.set_session(sess)

        def model():
            model = models.Sequential([
                layers.Dense(128,
                             input_dim=X_train.shape[1],
                             activation='relu'),
                layers.Dropout(0.5),
                layers.Dense(1, activation='sigmoid')
            ])
            model.compile(loss='binary_crossentropy',
                          optimizer='rmsprop',
                          metrics=['accuracy'])
            return model

        early_stopping = callbacks.EarlyStopping(monitor='val_loss',
                                                 patience=1,
                                                 verbose=0,
                                                 mode='auto')

        pipe = pipeline.Pipeline([('rescale', preprocessing.StandardScaler()),
                                  ('nn',
                                   KerasClassifier(build_fn=model,
                                                   nb_epoch=10,
                                                   batch_size=128,
                                                   validation_split=0.2,
                                                   callbacks=[early_stopping]))
                                  ])

        pipe.fit(X_train, y_train)

        model_step = pipe.steps.pop(-1)[1]
        joblib.dump(pipe, os.path.join(self.directory, 'pipeline.pkl'))
        print("Trained Model is Saved at relative path inside PROJECT_DIR ",
              self.directory)
        models.save_model(model_step.model,
                          os.path.join(self.directory, 'model.h5'))
        return
Esempio n. 44
0
    def training(self):
        stepsPerEpochs = self.train_generator.samples // self.batchSize
        #stepsPerEpochs = self.validation_generator.samples//self.batchSize
        validationSteps = self.validation_generator.samples // self.batchSize
        #with tf.device('/gpu:1'):

        # =============================================================================
        #         config = tf.ConfigProto( device_count = {'GPU': 1 , 'CPU': 56} )
        #         sess = tf.Session(config=config)
        #         keras.backend.set_session(sess)
        # =============================================================================
        self.hist = self.model.fit_generator(
            self.train_generator,
            steps_per_epoch=stepsPerEpochs,
            epochs=self.epochs,
            validation_data=self.validation_generator,
            validation_steps=validationSteps,
            verbose=1,
            callbacks=[self.reduceLR, self.EarlyCheckPt, self.ModelCkPt]
            #AltModelCheckpoint(self.path+'Models/OCR_Epochs.h5',self.baseModel)]
        )
        #except Exception as e:
        #print("Got issues : ", e)

        #saving the model after final Epoch
        save_model(self.model,
                   self.path + 'Models/Seperate_Models/Final_Alphabets_v3.h5')
        self.model.set_weights(self.model.get_weights())
        self.model.save(filepath=self.path +
                        'Models/Seperate_Models/Final_Alphabets_v3.h5')
        N = np.arange(0, self.epochs)
        plt.style.use("ggplot")
        plt.figure()
        plt.plot(N, self.hist.history["loss"], label="train_loss")
        plt.plot(N, self.hist.history["val_loss"], label="val_loss")
        plt.plot(N, self.hist.history["acc"], label="train_acc")
        plt.plot(N, self.hist.history["val_acc"], label="val_acc")
        plt.title("Training Loss and Accuracy (Simple NN)")
        plt.xlabel("Epoch #")
        plt.ylabel("Loss/Accuracy")
        plt.legend()
        plt.savefig(self.path + 'OutputAlphabets_v3.png')
Esempio n. 45
0
def main():
    np.random.seed(0)
    assert keras.backend.backend() == "tensorflow"

    flags.DEFINE_bool('NUM_EPOCHS', args.epochs, 'Number of epochs')

    # Get MNIST test data
    x_train, y_train, _, _, x_test, y_test = load_dataset_GTSRB(
        n_channel=N_CHANNEL, train_file_name=TRAIN_FILE_NAME)

    # Convert to one-hot encoding
    y_train = keras.utils.to_categorical(y_train, NUM_LABELS)
    y_test = keras.utils.to_categorical(y_test, NUM_LABELS)

    x = K.placeholder(shape=(None, HEIGHT, WIDTH, N_CHANNEL))
    y = K.placeholder(shape=(BATCH_SIZE, NUM_LABELS))

    eps = args.eps
    x_advs = [None]

    model = build_mltscl()

    if args.iter == 0:
        logits = model(x)
        grad = gen_grad(x, logits, y, loss='training')
        x_advs = symbolic_fgs(x, grad, eps=eps)
    elif args.iter == 1:
        x_advs = symb_iter_fgs(model, x, y, steps=40, alpha=0.01, eps=args.eps)

    # Train an MNIST model
    tf_train(x, y, model, x_train, y_train, x_advs=x_advs, benign=args.ben)

    # Finally print the result!
    test_error = tf_test_error_rate(model, x, x_test, y_test)
    print(test_error)

    # Specify model name
    model_name = './tmp/multiscale_adv'
    save_model(model, model_name)
    json_string = model.to_json()
    with open(model_name + '.json', 'wr') as f:
        f.write(json_string)
Esempio n. 46
0
def train(model, training_data, batch_size=256, epochs=10):
    (x_train, y_train), (x_test, y_test), mapping, nb_classes = training_data

    y_train = np_utils.to_categorical(y_train, nb_classes)
    y_test = np_utils.to_categorical(y_test, nb_classes)

    tb_callback = keras.callbacks.TensorBoard(log_dir='./model/Graph',
        histogram_freq=0, write_graph=True, write_images=True)

    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1,
        validation_data=(x_test, y_test), callbacks=[tb_callback])

    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test score:', score[0])
    print('Test accuracy:', score[1])

    model_yaml = model.to_yaml()
    with open('model/model.yaml', 'w') as yaml_file:
        yaml_file.write(model_yaml)
    save_model(model, 'model/model.h5')
Esempio n. 47
0
def train_model(Xi, Yi, idx, init_val):

    # Create the LSTM model
    lstm_model = create_lstm_model(input_shape=(1, Xi.shape[1], Xi.shape[2]))

    # Train the model for <n_epochs>
    for i in range(N_TRAINING_EPOCHS):
        lstm_model.fit(Xi, Yi, epochs=1, batch_size=1, verbose=0, shuffle=False)
        lstm_model.reset_states()

    mse = evaluate_model(lstm_model, Xi, Yi, init_val)

    print idx, mse

    # Save performance score and trained model:

    with open("./model_performance/" + DATASET + "/" + str(idx) + ".pkl", "wb") as lf:
        pickle.dump(mse, lf)

    save_model(lstm_model, "./models/" + DATASET + "/" + str(idx) + ".kmodel")
Esempio n. 48
0
    def save_model_to_mongo(self, model: Any, trained_from: date = None, trained_upto: date = None):
        fs = connect_grid()

        if self.keras:
            with NamedTemporaryFile(suffix='.hdf5', delete=True) as ntf:
                save_model(model, ntf.name, overwrite=True)
                with BytesIO(Binary(ntf.read())) as f:
                    objectId = fs.put(f, filename=self.model_name, chunk_size=2097152)

        else:
            with BytesIO(Binary(dumps(model))) as f:
                objectId = fs.put(f, filename=self.model_name, chunk_size=2097152)

        PythonModel(
            grid_fileid=objectId,
            model_name=self.model_name,
            symbol=self.symbol,
            trained_from=trained_from,
            trained_upto=trained_upto,
        ).save()
Esempio n. 49
0
def test_fuctional_model_saving():
    input = Input(shape=(3,))
    x = Dense(2)(input)
    output = Dense(3)(x)

    model = Model(input, output)
    model.compile(loss=objectives.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    fname = 'tmp_' + str(np.random.randint(10000)) + '.h5'
    save_model(model, fname)

    model = load_model(fname)
    out2 = model.predict(x)
    assert_allclose(out, out2)
Esempio n. 50
0
    def continue_train(self, lr=1e-4, epochs=1, resume_model='last'):

        if resume_model == 'last':
            model = load_model(self.last_model_path)
        else:
            model = load_model(self.best_model_path)
        model.summary()

        X_train, X_test, y_train, y_test = self.get_data()
        model.fit(x=X_train,
                  y=y_train,
                  batch_size=self.batch_size,
                  epochs=epochs,
                  verbose=1,
                  callbacks=self.get_callbacks(),
                  validation_data=(X_test, y_test),
                  shuffle=True,
                  # initial_epoch=start_epoch,
                  steps_per_epoch=None, validation_steps=None)
        save_model(model, self.last_model_path)
Esempio n. 51
0
 def NN_model_train(self, trainX, trainY, testX, testY, model_save_path):
     """
     :param trainX: training data set
     :param trainY: expect value of training data
     :param testX: test data segt
     :param testY: expect value of test data
     :param model_save_path: h5 file to store the trained model
     :param override: override existing models
     :return: model after training
     """
     input_dim = trainX[0].shape[1]
     output_dim = trainY.shape[1]
     # print predefined parameters of current model:
     model = Sequential()
     # applying a LSTM layer with x dim output and y dim input. Use dropout parameter to avoid overfit
     model.add(LSTM(output_dim=self.lstm_output_dim,
                    input_dim=input_dim,
                    activation=self.activation_lstm,
                    dropout_U=self.drop_out,
                    return_sequences=True))
     for i in range(self.lstm_layer-2):
         model.add(LSTM(output_dim=self.lstm_output_dim,
                    activation=self.activation_lstm,
                    dropout_U=self.drop_out,
                    return_sequences=True ))
     # return sequences should be False to avoid dim error when concatenating with dense layer
     model.add(LSTM(output_dim=self.lstm_output_dim, activation=self.activation_lstm, dropout_U=self.drop_out))
     # applying a full connected NN to accept output from LSTM layer
     for i in range(self.dense_layer-1):
         model.add(Dense(output_dim=self.lstm_output_dim, activation=self.activation_dense))
         model.add(Dropout(self.drop_out))
     model.add(Dense(output_dim=output_dim, activation=self.activation_last))
     # configure the learning process
     model.compile(loss=self.loss, optimizer=self.optimizer, metrics=['accuracy'])
     # train the model with fixed number of epoches
     model.fit(x=trainX, y=trainY, nb_epoch=self.nb_epoch, batch_size=self.batch_size, validation_data=(testX, testY))
     plot_model(model, to_file='model.png')
     score = model.evaluate(trainX, trainY, self.batch_size)
     print ("Model evaluation: {}".format(score))                  # [0.29132906793909186, 0.91639871695672837]
     # store model to json file
     save_model(model, model_save_path)
Esempio n. 52
0
def test_saving_custom_activation_function():
    x = Input(shape=(3,))
    output = Dense(3, activation=K.cos)(x)

    model = Model(x, output)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname, custom_objects={'cos': K.cos})
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
Esempio n. 53
0
def test_loop_model_saving():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])

    x = np.random.random((1, 3))
    y = np.random.random((1, 2))
    _, fname = tempfile.mkstemp('.h5')

    for _ in range(3):
        model.train_on_batch(x, y)
        save_model(model, fname, overwrite=True)
        out = model.predict(x)

    new_model = load_model(fname)
    os.remove(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
Esempio n. 54
0
def test_loop_model_saving():
    model = Sequential()
    model.add(Dense(2, input_shape=(3, )))
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])

    x = np.random.random((1, 3))
    y = np.random.random((1, 2))
    _, fname = tempfile.mkstemp('.h5')

    for _ in range(3):
        model.train_on_batch(x, y)
        save_model(model, fname, overwrite=True)
        out = model.predict(x)

    new_model = load_model(fname)
    os.remove(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
Esempio n. 55
0
def test_model_saving_to_pre_created_h5py_file():
    model, x = _get_sample_model_and_input()

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    with h5py.File(fname, mode='r+') as h5file:
        save_model(model, h5file)
        loaded_model = load_model(h5file)
        out2 = loaded_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test non-default options in h5
    with h5py.File('does not matter', driver='core',
                   backing_store=False) as h5file:
        save_model(model, h5file)
        loaded_model = load_model(h5file)
        out2 = loaded_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    with h5py.File(fname, mode='r+') as h5file:
        g = h5file.create_group('model')
        save_model(model, g)
        loaded_model = load_model(g)
        out2 = loaded_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
Esempio n. 56
0
def test_saving_overwrite_option():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    org_weights = model.get_weights()
    new_weights = [np.random.random(w.shape) for w in org_weights]

    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)
    model.set_weights(new_weights)

    with patch('keras.engine.saving.ask_to_proceed_with_overwrite') as ask:
        ask.return_value = False
        save_model(model, fname, overwrite=False)
        ask.assert_called_once()
        new_model = load_model(fname)
        for w, org_w in zip(new_model.get_weights(), org_weights):
            assert_allclose(w, org_w)

        ask.return_value = True
        save_model(model, fname, overwrite=False)
        assert ask.call_count == 2
        new_model = load_model(fname)
        for w, new_w in zip(new_model.get_weights(), new_weights):
            assert_allclose(w, new_w)

    os.remove(fname)
Esempio n. 57
0
def test_saving_overwrite_option_gcs():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,)))
    org_weights = model.get_weights()
    new_weights = [np.random.random(w.shape) for w in org_weights]

    with tf_file_io_proxy('keras.engine.saving.tf_file_io') as file_io_proxy:
        gcs_filepath = file_io_proxy.get_filepath(
            filename='test_saving_overwrite_option_gcs.h5')
        # we should not use same filename in several tests to allow for parallel
        # execution
        save_model(model, gcs_filepath)
        model.set_weights(new_weights)

        with patch('keras.engine.saving.ask_to_proceed_with_overwrite') as ask:
            ask.return_value = False
            save_model(model, gcs_filepath, overwrite=False)
            ask.assert_called_once()
            new_model = load_model(gcs_filepath)
            for w, org_w in zip(new_model.get_weights(), org_weights):
                assert_allclose(w, org_w)

            ask.return_value = True
            save_model(model, gcs_filepath, overwrite=False)
            assert ask.call_count == 2
            new_model = load_model(gcs_filepath)
            for w, new_w in zip(new_model.get_weights(), new_weights):
                assert_allclose(w, new_w)

        file_io_proxy.delete_file(gcs_filepath)  # cleanup
Esempio n. 58
0
def test_saving_model_with_long_layer_names():
    # This layer name will make the `layers_name` HDF5 attribute blow
    # out of proportion. Note that it fits into the internal HDF5
    # attribute memory limit on its own but because h5py converts
    # the list of layer names into numpy array, which uses the same
    # amout of memory for every item, it increases the memory
    # requirements substantially.
    x = Input(shape=(2,), name='input_' + ('x' * (2**15)))
    f = x
    for i in range(4):
        f = Dense(2, name='dense_%d' % (i,))(f)

    model = Model(inputs=[x], outputs=[f])

    model.compile(loss='mse', optimizer='adam', metrics=['acc'])

    x = np.random.random((1, 2))
    y = np.random.random((1, 2))
    model.train_on_batch(x, y)

    out = model.predict(x)

    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)

    # Check that the HDF5 files contains chunked array
    # of layer names.
    with h5py.File(fname, 'r') as h5file:
        n_layer_names_arrays = len([attr for attr in h5file['model_weights'].attrs
                                    if attr.startswith('layer_names')])

    os.remove(fname)

    # The chunking of layer names array should have happened.
    assert n_layer_names_arrays > 0

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05)
Esempio n. 59
0
def test_functional_model_saving():
    inputs = Input(shape=(3,))
    x = Dense(2)(inputs)
    outputs = Dense(3)(x)

    model = Model(inputs, outputs)
    model.compile(loss=losses.MSE,
                  optimizer=optimizers.RMSprop(lr=0.0001),
                  metrics=[metrics.categorical_accuracy])
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    _, fname = tempfile.mkstemp('.h5')
    save_model(model, fname)

    model = load_model(fname)
    os.remove(fname)

    out2 = model.predict(x)
    assert_allclose(out, out2, atol=1e-05)