Пример #1
1
def test_image_data_generator_training():
    np.random.seed(1337)
    img_gen = ImageDataGenerator(rescale=1.)  # Dummy ImageDataGenerator
    input_shape = (16, 16, 3)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
                                                         num_test=200,
                                                         input_shape=input_shape,
                                                         classification=True,
                                                         num_classes=4)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    model = Sequential([
        layers.Conv2D(filters=8, kernel_size=3,
                      activation='relu',
                      input_shape=input_shape),
        layers.MaxPooling2D(pool_size=2),
        layers.Conv2D(filters=4, kernel_size=(3, 3),
                      activation='relu', padding='same'),
        layers.GlobalAveragePooling2D(),
        layers.Dense(y_test.shape[-1], activation='softmax')
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    history = model.fit_generator(img_gen.flow(x_train, y_train, batch_size=16),
                                  epochs=10,
                                  validation_data=img_gen.flow(x_test, y_test,
                                                               batch_size=16),
                                  verbose=0)
    assert history.history['val_acc'][-1] > 0.75
    model.evaluate_generator(img_gen.flow(x_train, y_train, batch_size=16))
Пример #2
0
def test_multiprocessing_evaluate_error():
    batch_size = 10
    good_batches = 3

    def custom_generator():
        """Raises an exception after a few good batches"""
        for i in range(good_batches):
            yield (np.random.randint(batch_size, 256, (50, 2)),
                   np.random.randint(batch_size, 2, 50))
        raise RuntimeError

    model = Sequential()
    model.add(Dense(1, input_shape=(2, )))
    model.compile(loss='mse', optimizer='adadelta')

    with pytest.raises(StopIteration):
        model.evaluate_generator(
            custom_generator(), good_batches + 1, 1,
            workers=4, use_multiprocessing=True,
        )

    with pytest.raises(StopIteration):
        model.evaluate_generator(
            custom_generator(), good_batches + 1, 1,
            use_multiprocessing=False,
        )
Пример #3
0
def test_multiprocessing_evaluate_error():

    batch_size = 32
    good_batches = 5

    def myGenerator():
        """Raises an exception after a few good batches"""
        for i in range(good_batches):
            yield (np.random.randint(batch_size, 256, (500, 2)),
                   np.random.randint(batch_size, 2, 500))
        raise RuntimeError

    model = Sequential()
    model.add(Dense(1, input_shape=(2, )))
    model.compile(loss='mse', optimizer='adadelta')

    samples = batch_size * (good_batches + 1)

    with pytest.raises(Exception):
        model.evaluate_generator(
            myGenerator(), samples, 1,
            nb_worker=4, pickle_safe=True,
        )

    with pytest.raises(Exception):
        model.evaluate_generator(
            myGenerator(), samples, 1,
            pickle_safe=False,
        )
Пример #4
0
def try_params( n_iterations, params, data=None, datamode='memory'):

	print "iterations:", n_iterations
	print_params( params )

        batchsize = 100
        if datamode == 'memory':
            X_train, Y_train = data['train']
            X_valid, Y_valid = data['valid']
            inputshape = X_train.shape[1:]
        else:
            train_generator = data['train']['gen_func'](batchsize, data['train']['path'])
            valid_generator = data['valid']['gen_func'](batchsize, data['valid']['path'])
            train_epoch_step = data['train']['n_sample'] / batchsize
            valid_epoch_step = data['valid']['n_sample'] / batchsize
            inputshape = data['train']['gen_func'](batchsize, data['train']['path']).next()[0].shape[1:]

        model = Sequential()
	model.add(Conv2D(128, (1, 24), padding='same', input_shape=inputshape, activation='relu'))
        model.add(GlobalMaxPooling2D())

        model.add(Dense(32,activation='relu'))
        model.add(Dropout(params['DROPOUT']))
        model.add(Dense(2))
        model.add(Activation('softmax'))

        optim = Adadelta
        myoptimizer = optim(epsilon=params['DELTA'], rho=params['MOMENT'])
        mylossfunc = 'categorical_crossentropy'
        model.compile(loss=mylossfunc, optimizer=myoptimizer,metrics=['accuracy'])

        early_stopping = EarlyStopping( monitor = 'val_loss', patience = 3, verbose = 0 )

        if datamode == 'memory':
            model.fit(
                    X_train,
                    Y_train,
                    batch_size=batchsize,
                    epochs=int( round( n_iterations )),
                    validation_data=(X_valid, Y_valid),
                    callbacks = [ early_stopping ])
            score, acc = model.evaluate(X_valid,Y_valid)
        else:
            model.fit_generator(
                    train_generator,
                    steps_per_epoch=train_epoch_step,
                    epochs=int( round( n_iterations )),
                    validation_data=valid_generator,
                    validation_steps=valid_epoch_step,
                    callbacks = [ early_stopping ])
            score, acc = model.evaluate_generator(valid_generator, steps=valid_epoch_step)

	return { 'loss': score, 'model': (model.to_json(), optim, myoptimizer.get_config(), mylossfunc) }
Пример #5
0
def test_multiprocessing_evaluating():
    arr_data = np.random.randint(0, 256, (50, 2))
    arr_labels = np.random.randint(0, 2, 50)

    def custom_generator():
        batch_size = 10
        n_samples = 50

        while True:
            batch_index = np.random.randint(0, n_samples - batch_size)
            start = batch_index
            end = start + batch_size
            X = arr_data[start: end]
            y = arr_labels[start: end]
            yield X, y

    # Build a NN
    model = Sequential()
    model.add(Dense(1, input_shape=(2, )))
    model.compile(loss='mse', optimizer='adadelta')

    model.evaluate_generator(custom_generator(),
                             steps=5,
                             max_queue_size=10,
                             workers=2,
                             use_multiprocessing=True)
    model.evaluate_generator(custom_generator(),
                             steps=5,
                             max_queue_size=10,
                             use_multiprocessing=False)
    model.evaluate_generator(custom_generator(),
                             steps=5,
                             max_queue_size=10,
                             use_multiprocessing=False,
                             workers=0)
Пример #6
0
def test_multiprocessing_evaluating():

    reached_end = False

    arr_data = np.random.randint(0,256, (500, 200))
    arr_labels = np.random.randint(0, 2, 500)

    def myGenerator():

        batch_size = 32
        n_samples = 500

        while True:
            batch_index = np.random.randint(0, n_samples - batch_size)
            start = batch_index
            end = start + batch_size
            X = arr_data[start: end]
            y = arr_labels[start: end]
            yield X, y

    # Build a NN
    model = Sequential()
    model.add(Dense(10, input_shape=(200, )))
    model.add(Activation('relu'))
    model.add(Dense(1))
    model.add(Activation('linear'))
    model.compile(loss='mse', optimizer='adadelta')

    model.evaluate_generator(myGenerator(),
                             val_samples=320,
                             max_q_size=10,
                             nb_worker=2,
                             pickle_safe=True)
    model.evaluate_generator(myGenerator(),
                             val_samples=320,
                             max_q_size=10,
                             pickle_safe=False)
    reached_end = True

    assert reached_end
Пример #7
0
def test_sequential_fit_generator_finite_length():
    (X_train, y_train), (X_test, y_test) = _get_test_data(1000,200)

    def data_generator(train, nbatches):
        if train:
            max_batch_index = len(X_train) // batch_size
        else:
            max_batch_index = len(X_test) // batch_size
        for i in range(nbatches):
            if train:
                yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (X_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size])

    model = Sequential()
    model.add(Dense(nb_hidden, input_shape=(input_dim,), activation='relu'))
    model.add(Dense(nb_class, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nsamples = (len(X_train) // batch_size) * batch_size
    model.fit_generator(data_generator(True, nsamples//batch_size), nsamples, nb_epoch)

    loss = model.evaluate(X_train, y_train)
    assert(loss < 3.0)

    eval_results = model.evaluate_generator(data_generator(True, nsamples//batch_size), nsamples, nb_epoch)
    assert(eval_results < 3.0)

    predict_results = model.predict_generator(data_generator(True, nsamples//batch_size), nsamples, nb_epoch)
    assert(predict_results.shape == (nsamples, 4))

    # should fail because not enough samples
    try:
        model.fit_generator(data_generator(True, nsamples//batch_size), nsamples+1, nb_epoch)
        assert(False)
    except:
        pass

    # should fail because generator throws exception
    def bad_generator(gen):
        for i in range(0,20):
            yield next(gen)
        raise Exception("Generator raised an exception")

    try:
        model.fit_generator(bad_generator(data_generator(True, nsamples//batch_size)), nsamples+1, nb_epoch)
        assert(False)
    except:
        pass
Пример #8
0
def test_multithreading_evaluate_error():
    arr_data = np.random.randint(0, 256, (50, 2))
    arr_labels = np.random.randint(0, 2, 50)
    batch_size = 10
    n_samples = 50
    good_batches = 3

    @threadsafe_generator
    def custom_generator():
        """Raises an exception after a few good batches"""
        for i in range(good_batches):
            batch_index = np.random.randint(0, n_samples - batch_size)
            start = batch_index
            end = start + batch_size
            X = arr_data[start: end]
            y = arr_labels[start: end]
            yield X, y
        raise RuntimeError

    model = Sequential()
    model.add(Dense(1, input_shape=(2,)))
    model.compile(loss='mse', optimizer='adadelta')

    # - Produce data on 4 worker threads, consume on main thread:
    #   - All worker threads share the SAME generator
    #   - Make sure `RuntimeError` exception bubbles up
    with pytest.raises(RuntimeError):
        model.evaluate_generator(custom_generator(),
                                 steps=good_batches * WORKERS + 1,
                                 max_queue_size=10,
                                 workers=WORKERS,
                                 use_multiprocessing=False)

    # - Produce data on 1 worker thread, consume on main thread:
    #   - Worker thread is the only thread running the generator
    #   - Make sure `RuntimeError` exception bubbles up
    with pytest.raises(RuntimeError):
        model.evaluate_generator(custom_generator(),
                                 steps=good_batches + 1,
                                 max_queue_size=10,
                                 workers=1,
                                 use_multiprocessing=False)

    # - Produce and consume data without a queue on main thread
    #   - Make sure the value of `use_multiprocessing` is ignored
    #   - Make sure `RuntimeError` exception bubbles up
    with pytest.raises(RuntimeError):
        model.evaluate_generator(custom_generator(),
                                 steps=good_batches + 1,
                                 max_queue_size=10,
                                 workers=0,
                                 use_multiprocessing=False)
Пример #9
0
def test_multithreading_evaluating():
    arr_data = np.random.randint(0, 256, (50, 2))
    arr_labels = np.random.randint(0, 2, 50)

    @threadsafe_generator
    def custom_generator():
        batch_size = 10
        n_samples = 50

        while True:
            batch_index = np.random.randint(0, n_samples - batch_size)
            start = batch_index
            end = start + batch_size
            X = arr_data[start: end]
            y = arr_labels[start: end]
            yield X, y

    # Build a NN
    model = Sequential()
    model.add(Dense(1, input_shape=(2,)))
    model.compile(loss='mse', optimizer='adadelta')

    # - Produce data on 4 worker threads, consume on main thread:
    #   - All worker threads share the SAME generator
    model.evaluate_generator(custom_generator(),
                             steps=STEPS,
                             max_queue_size=10,
                             workers=WORKERS,
                             use_multiprocessing=False)

    # - Produce data on 1 worker thread, consume on main thread:
    #   - Worker thread is the only thread running the generator
    model.evaluate_generator(custom_generator(),
                             steps=STEPS,
                             max_queue_size=10,
                             workers=1,
                             use_multiprocessing=False)

    # - Produce and consume data without a queue on main thread
    #   - Make sure the value of `use_multiprocessing` is ignored
    model.evaluate_generator(custom_generator(),
                             steps=STEPS,
                             max_queue_size=10,
                             workers=0,
                             use_multiprocessing=False)
Пример #10
0
              optimizer='rmsprop',
              metrics=['accuracy'])

print(model.summary())

# ### Learning

epochs = 20

history = model.fit_generator(train_generator,
                              steps_per_epoch=nimages_train // batch_size,
                              epochs=epochs,
                              validation_data=validation_generator,
                              validation_steps=nimages_validation //
                              batch_size,
                              verbose=2,
                              callbacks=callbacks,
                              use_multiprocessing=True,
                              workers=4)

model.save("dvc-small-cnn.h5")

# ### Inference

print('Evaluating model...')
scores = model.evaluate_generator(test_generator,
                                  steps=nimages_test // batch_size,
                                  use_multiprocessing=True,
                                  workers=4)
print("Test set %s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))
Пример #11
0
        model.add(SimpleRNN(num_classes, activation='sigmoid'))

    model.summary()

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy', tp, tn])

    filepath = 'models/fold{}.{}l.{}h.{}-last.weights.best.hdf5'.format(
        i, layers, hidden_units, args.last)
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max')
    callbacks_list = [checkpoint]

    history = model.fit_generator(
        train_gen,
        epochs=epochs,
        verbose=1,
        validation_data=val_gen,
        max_queue_size=5,
        #	                    use_multiprocessing = True,
        callbacks=callbacks_list)

    score = model.evaluate_generator(val_gen)

    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
Пример #12
0
a = Cropping2D((3, 3))(a)
a = UpSampling2D((2, 2))(a)
a = Conv2D(1, (1, 1), padding="same", activation="relu")(a)

model = Model(inp0, a)

model.compile(loss="sparse_categorical_crossentropy",
              optimizer="adam",
              metrics=["accuracy"])
print(model.summary())

model_json = model.to_json()
json_file = open("mnist_model.json", "w")
json_file.write(model_json)
json_file.close()
model.save_weights("model1.h5")
model.fit_generator(train_generator,
                    steps_per_epoch=nb_train_samples // batch_size,
                    epochs=epochs,
                    validation_data=val_generator,
                    validation_steps=nb_validation_samples // batch_size)

scores = model.evaluate_generator(test_generator,
                                  nb_test_samples // batch_size)
print((scores[1] * 100))

image_file_name = '/Users/olgalavricenko/vtrain.png'
img = cv2.imread(image_file_name)
cv2.imshow('img', img)
cv2.waitKey(0)
Пример #13
0
# In[8]:

STEP_SIZE_TRAIN = train_generator.n // train_generator.batch_size
STEP_SIZE_VALID = val_generator.n // val_generator.batch_size
STEP_SIZE_TEST = test_generator.n // test_generator.batch_size

classifier.fit_generator(generator=train_generator,
                         steps_per_epoch=STEP_SIZE_TRAIN,
                         validation_data=val_generator,
                         validation_steps=STEP_SIZE_VALID,
                         epochs=40)

# In[11]:

classifier.evaluate_generator(generator=val_generator, steps=STEP_SIZE_TEST)

# In[12]:

pred = classifier.predict_generator(test_generator,
                                    steps=STEP_SIZE_TEST + 1,
                                    verbose=1)

# In[13]:

predicted_class_indices = np.argmax(pred, axis=1)

# In[15]:

pd.DataFrame(predicted_class_indices,
             columns=['emotion'
Пример #14
0
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

nb_epoch = 30
nb_train_samples = 20000
nb_test_samples = 2000

model.fit_generator(train_generator,
                    samples_per_epoch=nb_train_samples,
                    nb_epoch=nb_epoch,
                    validation_data=test_generator,
                    nb_val_samples=nb_test_samples)

model.save_weights('models/basic_cnn_20_epochs.h5')

model.evaluate_generator(test_generator, nb_test_samples)
Пример #15
0
    ])

# import matplotlib.pyplot as plt
# for batch in training_set:
#   for i in range(0,9):
#     plt.subplot(330+1+i)
#     plt.imshow(X_batch[i].reshape(28, 28), cmap=plt.get_cmap('gray'))
#   #images=list(images).reshape(128,128,1)
#   plt.show()
#   print(images,images.shape)

model_class = model.fit_generator(training_set,
                                  steps_per_epoch=32,
                                  epochs=175,
                                  validation_data=validation_set,
                                  validation_steps=32,
                                  callbacks=[checkpoint])

score = model.evaluate_generator(validation_set)
print('test loss', score[0])
print('test accuracy', score[1])

# predict_datagen = ImageDataGenerator(rescale = 1./255)

# predict_set = predict_datagen.flow_from_directory('predict',target_size = (128, 128),
#                                                  batch_size = 82,
#                                                  class_mode = 'categorical')
# X,y = predict_set.next()
# arr = model.predict_classes(X)
# print(arr)
Пример #16
0
# Load label names to use in prediction results
label_list_path = 'datasets/cifar-10-batches-py/batches.meta'


keras_dir = os.path.expanduser(os.path.join('~', '.keras'))
datadir_base = os.path.expanduser(keras_dir)
if not os.access(datadir_base, os.W_OK):
    datadir_base = os.path.join('/tmp', '.keras')
label_list_path = os.path.join(datadir_base, label_list_path)

with open(label_list_path, mode='rb') as f:
    labels = pickle.load(f)

# Evaluate model with test data set and share sample prediction results
evaluation = model.evaluate_generator(datagen.flow(x_test, y_test,
                                      batch_size=batch_size),
                                      steps=x_test.shape[0] // batch_size)

print('Model Accuracy = %.2f' % (evaluation[1]))

predict_gen = model.predict_generator(datagen.flow(x_test, y_test,
                                      batch_size=batch_size),
                                      steps=x_test.shape[0] // batch_size)

for predict_index, predicted_y in enumerate(predict_gen):
    actual_label = labels['label_names'][np.argmax(y_test[predict_index])]
    predicted_label = labels['label_names'][np.argmax(predicted_y)]
    print('Actual Label = %s vs. Predicted Label = %s' % (actual_label,
                                                          predicted_label))
    if predict_index == num_predictions:
        break
Пример #17
0
model.add(Dropout(0.3))
model.add(Dense(128, activation=activ))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
history = model.fit_generator(bg,
                              epochs=5,
                              steps_per_epoch=1000,
                              validation_data=(val_vects, val_y),
                              verbose=True)

#test_vects = np.array([get_concatenated_embeddings(test_emb) for test_emb in all_embeddings_test])
#test_y = np.array(y_test[:3000])
test_gen = batch_gen(n_batches, all_y_test, all_embeddings_test)
scores = model.evaluate_generator(test_gen, steps=25, verbose=1)
print("Accuracy", scores[1])

import matplotlib.pyplot as plt
batch_size = 30000
test_whole = batch_gen(1, all_y_test, all_embeddings_test)

pred_prob = model.predict_generator(test_whole, steps=1, verbose=1)
pred_y = pred_prob > 0.5
print("F1 score: ", f1_score(all_y_test[:batch_size], pred_y))
print("Confusion_matrix:\n", confusion_matrix(all_y_test[:batch_size], pred_y))
precision, recall, _ = precision_recall_curve(all_y_test[:batch_size],
                                              pred_prob)

plt.figure()
plt.title("Precision Recall Curve")
Пример #18
0
    Flatten(),
    Dense(16, activation="relu"),
    Dense(2, activation="relu")
])
model.compile(optimizer="sgd", loss="mean_squared_error", metrics=["accuracy"])

model.summary()

con_network = model.fit_generator(train_batches,
                                  steps_per_epoch=(5216 / 10),
                                  epochs=5,
                                  validation_data=val_batches,
                                  validation_steps=100,
                                  verbose=2)

accuracy_test = model.evaluate_generator(test_batches, steps=624)

print(f"The test accuracy is: {accuracy_test[1]*100}")

plt.plot(con_network.history["accuracy"])
plt.plot(con_network.history["val_accuracy"])
plt.title("Accuracy of the model")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.legend(["Training set", "Validation set"], loc="upper left")
plt.show()

plt.plot(con_network.history["loss"])
plt.plot(con_network.history["val_loss"])
plt.title("Accuracy of the model")
plt.xlabel("Epoch")
Пример #19
0
train_steps = counts(train_dir, lookback=lookback, delay=delay)
print(train_steps)
test_steps = counts(test_dir, lookback=lookback, delay=delay)
print(test_steps)

# 建立模型
model = Sequential()
model.add(
    layers.LSTM(32,
                dropout=0.1,
                recurrent_dropout=0.5,
                return_sequences=True,
                input_shape=(None, 1)))

model.add(
    layers.LSTM(64, activation='relu', dropout=0.1, recurrent_dropout=0.5))

model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(lr=1e-4), loss='mae')
history = model.fit_generator(train,
                              steps_per_epoch=train_steps,
                              epochs=100,
                              validation_data=val,
                              validation_steps=val_steps)

gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

model.evaluate_generator(test, steps=test_steps)
model.save('predict_co.h5')
Пример #20
0
submission_df.drop(['filename', 'category'], axis=1, inplace=True)
submission_df.to_csv('S:/submission.csv', index=False)

# In[60]:

submission_df['real'] = submission_df.id.str.split(' ').str[0]

# In[61]:

submission_df

# accuracy print

# In[62]:

score = model.evaluate_generator(validation_generator,
                                 steps=len(validation_generator))
print('Test score:', score[0])
print('Test accuracy:', score[1])

# ### confusion matrix

# In[63]:

from sklearn.metrics import confusion_matrix

# In[64]:

pd.crosstab(submission_df['real'],
            submission_df['label'],
            rownames=['True'],
            colnames=['Predicted'],
Пример #21
0
deep_morph.add(Dropout(0.2))

from keras import optimizers
deep_morph.compile(optimizer='adadelta',
                   loss='binary_crossentropy',
                   metrics=['accuracy', f1, precision, recall])
print(deep_morph.summary())

# Training
deep_morph.fit_generator(train_generator_word_level(train_file),
                         epochs=100,
                         steps_per_epoch=720000)

# Evaluation
print(
    deep_morph.evaluate_generator(train_generator_word_level(train_file),
                                  steps=180000))
print(
    deep_morph.evaluate_generator(train_generator_word_level(test_file),
                                  steps=180000))

deep_morph = Sequential()
# deep_morph.add(Merge([chars, feats]))
deep_morph.add(emb)
deep_morph.add(Dropout(0.2))
# deep_morph.add(GRU(16,return_sequences=True))
deep_morph.add(Bidirectional(GRU(64, activation='relu',
                                 return_sequences=True)))
deep_morph.add(Dropout(0.2))
deep_morph.add(Bidirectional(GRU(64, activation='relu',
                                 return_sequences=True)))
deep_morph.add(Dropout(0.2))
    batches = [
        list(range(batch_size * i, min(len(data), batch_size * (i + 1))))
        for i in range(len(data) / batch_size + 1)
    ]
    while True:
        for i in batches:
            xx = np.zeros((maxlen, len(abc)))
            # data[i]的shape为128,因为二维是句子包含的词的个数,每个句子都不一样
            # map中gen_matrix中传入的是句子,返回的是200*2417的张量。
            # 最后xx的shape为128*200*2417
            xx, yy = np.array(map(gen_matrix, data[i])), labels[i]
            # print "xx yy shape: "
            # print data[i].shape, xx.shape, yy.shape
            yield (xx, yy)


#a = data_generator(x[:train_num], y[:train_num], batch_size)
#a.next()
model.fit_generator(data_generator(x[:train_num], y[:train_num], batch_size),
                    samples_per_epoch=train_num,
                    nb_epoch=30)
model.evaluate_generator(data_generator(x[train_num:], y[train_num:],
                                        batch_size),
                         val_samples=len(x[train_num:]))


def predict_one(s):  #单个句子的预测函数
    s = gen_matrix(doc2num(s, maxlen))
    s = s.reshape((1, s.shape[0], s.shape[1]))
    return model.predict_classes(s, verbose=0)[0][0]
# Training

nb_epoch = 30
nb_train_samples = 5020
nb_validation_samples = 1835

# gives the class indices "added this"
print validation_generator.class_indices
print validation_generator.classes
'''
model.fit_generator(
        train_generator,
        samples_per_epoch=nb_train_samples,
        nb_epoch=nb_epoch,
        validation_data=validation_generator,
        nb_val_samples=nb_validation_samples)

# fixed error
model.save_weights('models/basic_cnn_30_epochs.h5')
'''

model.load_weights('models/basic_cnn_30_epochs.h5')
print "Loaded"

# evaluate_generator() function will give output of the format
# [loss, accuracy]
# print model.evaluate_generator(validation_generator, nb_validation_samples)
# no. of test_samples
print model.evaluate_generator(test_generator, 4)
Пример #24
0
# # Plot accuracy
acc_train = history.history["acc"]
val_acc = history.history["val_acc"]
epochs = range(0, len(acc_train))
plt.plot(epochs, acc_train, "b", label="Training Accuracy")
plt.plot(epochs, val_acc, "r", label="Validation Accuracy")
plt.title("Accuracy: Training and Validation")
plt.xlabel("Epochs")
plt.ylim(0, 1)
plt.legend()
plt.savefig("./cnn_baseline_figs/accuracy.png", dpi=300)
plt.show()

# Calculate test scores
test_loss, test_acc = model.evaluate_generator(test_ds, steps=50)
print("Test Loss:", test_loss)
print("Test Accuracy:", test_acc)

# Confusion Matrix
Y_pred = model.predict_generator(test_ds)
y_pred = np.argmax(Y_pred, axis=1)
cm = confusion_matrix(test_ds.classes, y_pred)
print("Confusion Matrix - Test Data Set")
print(cm)
index = ["female", "male"]
columns = ["female", "male"]
cm_df = pd.DataFrame(cm, columns, index)
sns.heatmap(cm_df / np.sum(cm_df),
            annot=True,
            fmt=".2%",
Пример #25
0
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                              factor=0.2,
                                              patience=4,
                                              verbose=1,
                                              min_lr=0.0001)
history = model.fit_generator(generator=tr_gen,
                              steps_per_epoch=count,
                              nb_epoch=80,
                              validation_data=tr_gen1,
                              validation_steps=count1,
                              max_queue_size=2,
                              callbacks=[m_check])

#model.fit(np.array(data_train),np.array(label_train), batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(np.array(data_test),np.array(label_test)))
score = model.evaluate_generator(generator=tr_gen2,
                                 steps=count2,
                                 max_queue_size=2,
                                 verbose=0)
print('Score: ', score)
score = model.evaluate_generator(generator=tr_gen1,
                                 steps=count1,
                                 max_queue_size=2,
                                 verbose=0)
print('Score holdout: ', score)

print("With best model")
file_path = './sign_augment500.h5'
model1 = load_model(file_path, custom_objects={'fmeasure': fmeasure})
score1 = model1.evaluate_generator(generator=tr_gen2,
                                   steps=count2,
                                   max_queue_size=2,
                                   verbose=0)
class model_vt (object):
    """

    Reimplementation of the voxnet by dimatura

    """
    def __init__(self, nb_classes, dataset_name):
        """

        Args:
            nb_classes: number of classes the model is going to learn, int
            dataset_name: name of the dataset {modelnet40, modelnet10} just used to save weights every epoche

        initializes voxnet based on keras framework

        layers:
            3D Convolution
            Leaky ReLu
            Dropout
            3d Convolution
            Leaky ReLu
            MaxPool
            Dropout
            Dense
            Dropout
            Dense

        """

        # Stochastic Gradient Decent (SGD) with momentum
        # lr=0.01 for LiDar dataset
        # lr=0.001 for other datasets
        # decay of 0.00016667 approx the same as learning schedule (0:0.001,60000:0.0001,600000:0.00001)
        self._optimizer = SGD(lr=0.01, momentum=0.9, decay=0.00016667, nesterov=False)

        # use callbacks learingrate_schedule as alternative to learning_rate decay
        #   self._lr_schedule = LearningRateScheduler(learningRateSchedule)

        # save weights after every epoche
        self._mdl_checkpoint = ModelCheckpoint("weights/" + dataset_name + "_{epoch:02d}_{acc:.2f}.hdf5",
                                               monitor="acc", verbose=0, save_best_only=False, mode="auto")

        # create directory if necessary
        if not os.path.exists("weights/"):
            os.makedirs("weights/")

        # init model
        self._mdl = Sequential()

        # convolution 1
        self._mdl.add(Convolution3D(input_shape=(1, 32, 32, 32),
                                    nb_filter=32,
                                    kernel_dim1=5,
                                    kernel_dim2=5,
                                    kernel_dim3=5,
                                    init='normal',
                                    border_mode='valid',
                                    subsample=(2, 2, 2),
                                    dim_ordering='th',
                                    W_regularizer=l2(0.001),
                                    b_regularizer=l2(0.001),
                                    ))

        logging.debug("Layer1:Conv3D shape={0}".format(self._mdl.output_shape))
        #Activation Leaky ReLu
        self._mdl.add(Activation(LeakyReLU(alpha=0.1)))

        # dropout 1
        self._mdl.add(Dropout(p=0.3))

        # convolution 2
        self._mdl.add(Convolution3D(nb_filter=32,
                                    kernel_dim1=3,
                                    kernel_dim2=3,
                                    kernel_dim3=3,
                                    init='normal',
                                    border_mode='valid',
                                    subsample=(1, 1, 1),
                                    dim_ordering='th',
                                    W_regularizer=l2(0.001),
                                    b_regularizer=l2(0.001),
                                    ))
        logging.debug("Layer3:Conv3D shape={0}".format(self._mdl.output_shape))

        #Activation Leaky ReLu
        self._mdl.add(Activation(LeakyReLU(alpha=0.1)))

        # max pool 1
        self._mdl.add(MaxPooling3D(pool_size=(2, 2, 2),
                                   strides=None,
                                   border_mode='valid',
                                   dim_ordering='th'))
        logging.debug("Layer4:MaxPool3D shape={0}".format(self._mdl.output_shape))

        # dropout 2
        self._mdl.add(Dropout(p=0.4))

        # dense 1 (fully connected layer)
        self._mdl.add(Flatten())
        logging.debug("Layer5:Flatten shape={0}".format(self._mdl.output_shape))

        self._mdl.add(Dense(output_dim=128,
                            init='normal',
                            activation='linear',
                            W_regularizer=l2(0.001),
                            b_regularizer=l2(0.001),
                            ))
        logging.debug("Layer6:Dense shape={0}".format(self._mdl.output_shape))

        # dropout 3
        self._mdl.add(Dropout(p=0.5))

        # dense 2 (fully connected layer)
        self._mdl.add(Dense(output_dim=nb_classes,
                            init='normal',
                            activation='linear',
                            W_regularizer=l2(0.001),
                            b_regularizer=l2(0.001),
                            ))
        logging.debug("Layer8:Dense shape={0}".format(self._mdl.output_shape))

        #Activation Softmax
        self._mdl.add(Activation("softmax"))

        # compile model
        self._mdl.compile(loss='categorical_crossentropy', optimizer=self._optimizer, metrics=["accuracy"])
        logging.info("Model compiled!")

    def fit(self, generator, samples_per_epoch,
            nb_epoch, valid_generator, nb_valid_samples, verbosity):
        """

        Args:
            generator: training sample generator from loader.train_generator
            samples_per_epoch: number of train sample per epoche from loader.return_train_samples
            nb_epoch: number of epochs to repeat traininf on full set
            valid_generator: validation sample generator from loader.valid_generator or NONE else
            nb_valid_samples: number of validation samples per epoche from loader.return_valid_samples
            verbosity: 0 (no output), 1 (full output), 2 (output only after epoche)

        """
        logging.info("Start training")
        self._mdl.fit_generator(generator=generator,
                                samples_per_epoch=samples_per_epoch,
                                nb_epoch=nb_epoch,
                                verbose=verbosity,
                                callbacks=[ #self._lr_schedule,
                                        self._mdl_checkpoint,],
                                validation_data=valid_generator,
                                nb_val_samples=nb_valid_samples,
                                )

        time_now = datetime.datetime.now()
        time_now = "_{0}_{1}_{2}_{3}_{4}_{5}".format(time_now.year, time_now.month, time_now.day,
                                                     time_now.hour, time_now.minute, time_now.second)
        logging.info("save model Voxnet weights as weights_{0}.h5".format(time_now))
        self._mdl.save_weights("weights_{0}.h5".format(time_now), False)

    def continue_fit(self, weights_file, generator, samples_per_epoch,
                     nb_epoch, valid_generator, nb_valid_samples, verbosity):
        """

        Args:
            weights_file: filename and adress of weights file .hdf5
            generator: training sample generator from loader.train_generator
            samples_per_epoch: number of train sample per epoche from loader.return_train_samples
            nb_epoch: number of epochs to repeat traininf on full set
            valid_generator: validation sample generator from loader.valid_generator or NONE else
            nb_valid_samples: number of validation samples per epoche from loader.return_valid_samples
            verbosity: 0 (no output), 1 (full output), 2 (output only after epoche)

        """
        self.load_weights(weights_file)
        self._mdl.fit_generator(generator=generator,
                            samples_per_epoch=samples_per_epoch,
                            nb_epoch=nb_epoch,
                            verbose=verbosity,
                            callbacks=[ #self._lr_schedule,
                                    self._mdl_checkpoint,],
                            validation_data=valid_generator,
                            nb_val_samples=nb_valid_samples,
                            )

    def evaluate(self, evaluation_generator, num_eval_samples):
        """

        Args:
            evaluation_generator: evaluation sample generator from loader.eval_generator
            num_eval_samples: number of train sample per epoche from loader.return_eval_samples

        """
        self._score = self._mdl.evaluate_generator(
            generator=evaluation_generator,
            val_samples=num_eval_samples)
        print("Test score:", self._score)

    def load_weights(self, file):
        """

        Args:
            file: filename and adress of weights file .hdf5

        """
        logging.info("Loading model weights from file '{0}'".format(file))
        self._mdl.load_weights(file)

    def predict(self, X_predict):
        """

        Args:
            X_predict: Features to use to predict labels, numpy ndarray shape [~,1,32,32,32]

        returns:
            Probability for every label

        """
        return self._mdl.predict_proba(X_predict, verbose=0)
Пример #27
0
print('FC3:', model.output_shape)
model.add(Dense(10, activation='relu'))
print('FC4:', model.output_shape)
model.add(Dense(1))
print('FC5:', model.output_shape)

log_shuffled = log.sample(frac=1)
size_total = log_shuffled.shape[0]
size_train = int(size_total * 0.7)
size_test = int(size_total * 0.1)
size_validation = size_total - size_train - size_test

log_train = log_shuffled.iloc[:size_train]
log_test = log_shuffled.iloc[size_train:size_train + size_test]
log_validation = log_shuffled.iloc[size_train + size_test:]

batch_size = 32
generator_train = data_generator(log_train, batch_size)
generator_test = data_generator(log_test, batch_size)
generator_validation = data_generator(log_validation, batch_size)

# adam = keras.optimizers.Adam(lr=0.001, decay=0.1)
model.compile(loss='mse', optimizer='adam')
loss_train_history = model.fit_generator(generator_train,
                                         samples_per_epoch=size_train,
                                         nb_epoch=5,
                                         validation_data=generator_validation,
                                         nb_val_samples=size_validation)
loss_evaulate = model.evaluate_generator(generator_test, val_samples=size_test)
print('Test loss:', loss_evaulate)
model.save('model.h5')
Пример #28
0
# Load label names to use in prediction results
label_list_path = 'datasets/cifar-10-batches-py/batches.meta'


keras_dir = os.path.expanduser(os.path.join('~', '.keras'))
datadir_base = os.path.expanduser(keras_dir)
if not os.access(datadir_base, os.W_OK):
    datadir_base = os.path.join('/tmp', '.keras')
label_list_path = os.path.join(datadir_base, label_list_path)

with open(label_list_path, mode='rb') as f:
    labels = pickle.load(f)

# Evaluate model with test data set and share sample prediction results
evaluation = model.evaluate_generator(datagen.flow(x_test, y_test,
                                      batch_size=batch_size),
                                      steps=x_test.shape[0] // batch_size)

print('Model Accuracy = %.5f' % (evaluation[1]))

f = open('CIFAR10_SELU_results.txt', 'a')
f.write(' Test accuracy:' + str(evaluation[1]) +  '\n')  
f.close() 


predict_gen = model.predict_generator(datagen.flow(x_test, y_test,
                                      batch_size=batch_size),
                                      steps=x_test.shape[0] // batch_size)

for predict_index, predicted_y in enumerate(predict_gen):
    actual_label = labels['label_names'][np.argmax(y_test[predict_index])]
Пример #29
0
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(4, 4)))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(400))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(200))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adamax',
              metrics=['accuracy'])

model.summary()

# In[5]
tensorboard = TensorBoard(log_dir='.\logs\custom')

model.fit_generator(train_generator,
                    steps_per_epoch=512,
                    epochs=10,
                    callbacks=[tensorboard],
                    verbose=2)

# In[6]
print(model.evaluate_generator(test_generator, steps=1000))
def test_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    # TODO: factor out
    def data_generator(x, y, batch_size=50):
        index_array = np.arange(len(x))
        while 1:
            batches = make_batches(len(x_test), batch_size)
            for batch_index, (batch_start, batch_end) in enumerate(batches):
                batch_ids = index_array[batch_start:batch_end]
                x_batch = x[batch_ids]
                y_batch = y[batch_ids]
                yield (x_batch, y_batch)

    model = Sequential()
    model.add(Dense(num_hidden, input_shape=(input_dim, )))
    model.add(Activation('relu'))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              validation_data=(x_test, y_test))
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=2,
              validation_split=0.1)
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=0)
    model.fit(x_train,
              y_train,
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test)

    prediction = model.predict_generator(data_generator(x_test, y_test),
                                         1,
                                         max_queue_size=2,
                                         verbose=1)
    gen_loss = model.evaluate_generator(data_generator(x_test, y_test, 50),
                                        1,
                                        max_queue_size=2)
    pred_loss = K.eval(
        K.mean(
            losses.get(model.loss)(K.variable(y_test),
                                   K.variable(prediction))))

    assert (np.isclose(pred_loss, loss))
    assert (np.isclose(gen_loss, loss))

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)
    model = Sequential()
    model.add(Dense(num_hidden, input_shape=(input_dim, )))
    model.add(Activation('relu'))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert (loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
Пример #31
0
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
training_set = train_datagen.flow_from_directory(
    '/dataset/cnn_dataset/training_set/',
    target_size=(64, 64),
    batch_size=32,
    class_mode='binary')
test_set = test_datagen.flow_from_directory('/dataset/cnn_dataset/test_set/',
                                            target_size=(64, 64),
                                            batch_size=32,
                                            class_mode='binary')
history = model.fit(training_set,
                    steps_per_epoch=8000,
                    epochs=1,
                    validation_data=test_set,
                    validation_steps=2000)
model.save('mymodel.h5')
from keras.preprocessing import image
test_image = image.load_img(
    '/dataset/cnn_dataset/single_prediction/cat_or_dog_2.jpg',
    target_size=(64, 64))
test_image = image.img_to_array(test_image)
import numpy as np
test_image = np.expand_dims(test_image, axis=0)
result = model.predict(test_image)
accuracy = model.evaluate_generator(test_set)
with open('acc_file.txt', 'w') as f:
    f.write(str(accuracy[1]))
Пример #32
0
# model.add(Dense(64, activation='relu'))
# model.add(BatchNormalization())
# model.add(Dropout(0.2))
# model.add(Dense(len(classes), activation='softmax'))

# model=multi_gpu_model(model, gpus=2)

# define the model
# model = tf.keras.Sequential([
#     tf.keras.Input(shape=(256,768)),
#     tf.compat.v1.keras.layers.CuDNNLSTM(64,  return_sequences=False),
#     tf.keras.layers.Flatten(),
#     tf.keras.layers.Dense(64, activation='relu'),
#     tf.keras.layers.BatchNormalization(),
#     tf.keras.layers.Dropout(0.2),
#     tf.keras.layers.Dense(len(classes))
# ])

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
print(model.summary())
# fit the model
model.fit_generator(train_generator,
                    epochs=10,
                    verbose=1,
                    validation_data=valid_generator)
# evaluate the model
loss, accuracy = model.evaluate_generator(valid_generator, verbose=1)
print('Accuracy: %f' % (accuracy * 100))
Пример #33
0
                                                    class_mode='categorical')

validation_generator = test_datagen.flow_from_directory(
    'test_data/',
    target_size=(20, 20),
    batch_size=32,
    class_mode='categorical')

history = model.fit_generator(train_generator,
                              steps_per_epoch=train_generator.samples / 32,
                              epochs=100,
                              validation_data=validation_generator,
                              validation_steps=validation_generator.samples /
                              32)

model.evaluate_generator(validation_generator, steps=32)
model.save('../Models/CNN/CNN3.h5')

import matplotlib.pyplot as plt

# Loss Curves
plt.figure(figsize=[8, 6])
plt.plot(history.history['loss'], 'r', linewidth=3.0)
plt.plot(history.history['val_loss'], 'b', linewidth=3.0)
plt.legend(['Training loss', 'Validation Loss'], fontsize=18)
plt.xlabel('Epochs ', fontsize=16)
plt.ylabel('Loss', fontsize=16)
plt.title('Loss Curves', fontsize=16)

# Accuracy Curves
plt.figure(figsize=[8, 6])
    steps_per_epoch=STEP_SIZE_TRAIN,
    validation_data=validation_generator,
    validation_steps=STEP_SIZE_VAL,
    epochs=30)

acc=history.history['acc']
val_acc=history.history['val_acc']
loss=history.history['loss']
val_loss=history.history['val_loss']

plot_loss_acc(acc,val_acc,loss,val_loss)

lucky_test_samples = np.random.randint(0, len(df), 5)
lucky_test_samples

cnnmodel.evaluate_generator(validation_generator, steps=STEP_SIZE_VAL)
for idx, row in df.iloc[lucky_test_samples].iterrows():    
    img = cv2.resize(cv2.imread("/content/drive/MyDrive/Dataset/indian_num-plate_dataset/" + row[0]) / 255.0, dsize=(WIDTH, HEIGHT))
    y_hat = cnnmodel.predict(img.reshape(1, WIDTH, HEIGHT, 3)).reshape(-1) * WIDTH
    
    xt, yt = y_hat[0], y_hat[1]
    xb, yb = y_hat[2], y_hat[3]
    
    img = cv2.cvtColor(img.astype(np.float32), cv2.COLOR_BGR2RGB)
    image = cv2.rectangle(img, (xt, yt), (xb, yb), (0, 0, 255), 1)
    plt.imshow(image)
    plt.show()

#VGG16 MODEL

VGG16model = Sequential()
    def train_network(path2train_dir, path2test_dir, path2val_dir, epochs,
                      batch_size, learn_rate, save_model_name):
        names = os.listdir(path2test_dir)
        num_train_samples = len(
            os.listdir(os.path.join(path2train_dir, names[0])))
        num_test_samples = len(
            os.listdir(os.path.join(path2test_dir, names[0])))
        num_val_samples = len(os.listdir(os.path.join(path2val_dir, names[0])))
        INPUT_SHAPE = (160, 160, 3)
        datagen = image.ImageDataGenerator(rescale=1. / 255)

        train_gen = datagen.flow_from_directory(path2train_dir,
                                                target_size=(INPUT_SHAPE[0],
                                                             INPUT_SHAPE[1]),
                                                color_mode='rgb',
                                                batch_size=batch_size,
                                                class_mode='categorical',
                                                shuffle=True,
                                                seed=42)

        test_gen = datagen.flow_from_directory(path2test_dir,
                                               target_size=(INPUT_SHAPE[0],
                                                            INPUT_SHAPE[1]),
                                               color_mode='rgb',
                                               batch_size=1,
                                               class_mode='categorical',
                                               shuffle=True,
                                               seed=42)

        val_gen = datagen.flow_from_directory(path2val_dir,
                                              target_size=(INPUT_SHAPE[0],
                                                           INPUT_SHAPE[1]),
                                              color_mode='rgb',
                                              batch_size=1,
                                              class_mode='categorical',
                                              shuffle=True,
                                              seed=42)

        sgd = SGD(lr=learn_rate, decay=1e-6, momentum=0.9, nesterov=True)

        # import facenet
        facenet = load_model('keras-facenet\\model\\facenet_keras.h5')
        facenet.load_weights(
            'keras-facenet\\weights\\facenet_keras_weights.h5')
        facenet.trainable = True
        trainable = False
        for layer in facenet.layers:
            if layer.name == 'Block17_5_Branch_1_Conv2d_0a_1x1':
                trainable = True
            layer.trainable = trainable

        model_net = Sequential()
        model_net.add(facenet)
        model_net.add(Dense(256))
        model_net.add(Activation('relu'))
        model_net.add(Dropout(0.5))
        model_net.add(Dense(3))
        model_net.add(Activation('sigmoid'))

        model_net.compile(loss='binary_crossentropy',
                          optimizer=sgd,
                          metrics=['accuracy'])
        model_net.fit_generator(train_gen,
                                steps_per_epoch=num_train_samples //
                                batch_size,
                                epochs=epochs,
                                validation_data=val_gen,
                                validation_steps=num_val_samples)
        model_net.save(save_model_name, include_optimizer=False)
        scores = model_net.evaluate_generator(test_gen, num_test_samples)

        print('Точность на тестовых данных составляет: %.2f%%' %
              (scores[1] * 100))
Пример #36
0
plt.subplot(1, 2, 2)
plt.plot(model_history.history['accuracy'], label='Train Accuracy')
plt.plot(model_history.history['val_accuracy'], label='Validation Accuracy')
plt.legend()
plt.xlabel('Number of epochs', fontsize=14)
plt.ylabel('Accuracy', fontsize=14)
plt.show()

# ## Model Evaluation

# In[57]:

#train_loss, train_accuracy = model.evaluate_generator(generator=train_data_gen, steps=40000//batch_size)
valid_loss, valid_accuracy = model.evaluate_generator(generator=valid_data_gen,
                                                      steps=10000 //
                                                      batch_size)
test_loss, test_accuracy = model.evaluate_generator(data_gen.flow(X_test,
                                                                  y_test,
                                                                  seed=123),
                                                    steps=len(X_test) //
                                                    batch_size)

print('Validation Accuracy: ', round((valid_accuracy * 100), 2), "%")
print('Test Accuracy: ', round((test_accuracy * 100), 2), "%")
print(" ")
print('Validation Loss: ', round(valid_loss, 2))
print('Test Loss: ', round(test_loss, 2))

# ## Confusion Matrix
X_train = pad_sequences(X_train, maxlen=max_len)
X_valid = pad_sequences(X_valid, maxlen=max_len)
X_test = pad_sequences(X_test, maxlen=max_len)
print('X_train shape:', X_train.shape)
print('X_valid shape:', X_valid.shape)
print('X_test shape:', X_test.shape)


vocab_size = len(tokenizer.word_index)


model = Sequential()
model.add(LSTM(50, dropout_W=0.5, dropout_U=0.5, return_sequences=True, input_shape=(max_len, vocab_size)))
model.add(MaxPooling1D(pool_length=max_len))
model.add(Flatten())
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(output_dim=nb_classes, activation='softmax'))

model.compile(loss='categorical_crossentropy', optimizer='adam')
              
train_generator = generate_seq_to_one_hot(X_train, Y_train, vocab_size, batch_size=gen_batch_size)
valid_generator = generate_seq_to_one_hot(X_valid, Y_valid, vocab_size, batch_size=gen_batch_size)
model.fit_generator(generator=train_generator, samples_per_epoch=len(X_train), nb_epoch=60,
                    show_accuracy=True, validation_data=valid_generator, nb_val_samples=len(X_valid))

test_generator = generate_seq_to_one_hot(X_test, Y_test, vocab_size, batch_size=gen_batch_size)
score = model.evaluate_generator(generator=test_generator, val_samples=len(X_test), 
                                 show_accuracy=True)
print('Test score:', score[0])
print('Test accuracy:', score[1])
Пример #38
0
def test_multiprocessing_evaluating():
    arr_data = np.random.randint(0, 256, (50, 2))
    arr_labels = np.random.randint(0, 2, 50)

    @threadsafe_generator
    def custom_generator():
        batch_size = 10
        n_samples = 50

        while True:
            batch_index = np.random.randint(0, n_samples - batch_size)
            start = batch_index
            end = start + batch_size
            X = arr_data[start: end]
            y = arr_labels[start: end]
            yield X, y

    # Build a NN
    model = Sequential()
    model.add(Dense(1, input_shape=(2,)))
    model.compile(loss='mse', optimizer='adadelta')

    # - Produce data on 4 worker processes, consume on main process:
    #   - Each worker process runs OWN copy of generator
    #   - BUT on Windows, `multiprocessing` won't marshall generators across
    #     process boundaries
    #       -> make sure `evaluate_generator()` raises raises ValueError
    #          exception and does not attempt to run the generator.
    if os.name is 'nt':
        with pytest.raises(ValueError):
            model.evaluate_generator(custom_generator(),
                                     steps=STEPS,
                                     max_queue_size=10,
                                     workers=WORKERS,
                                     use_multiprocessing=True)
    else:
        model.evaluate_generator(custom_generator(),
                                 steps=STEPS,
                                 max_queue_size=10,
                                 workers=WORKERS,
                                 use_multiprocessing=True)

    # - Produce data on 1 worker process, consume on main process:
    #   - Worker process runs generator
    #   - BUT on Windows, `multiprocessing` won't marshall generators across
    #     process boundaries -> make sure `evaluate_generator()` raises ValueError
    #     exception and does not attempt to run the generator.
    if os.name is 'nt':
        with pytest.raises(ValueError):
            model.evaluate_generator(custom_generator(),
                                     steps=STEPS,
                                     max_queue_size=10,
                                     workers=1,
                                     use_multiprocessing=True)
    else:
        model.evaluate_generator(custom_generator(),
                                 steps=STEPS,
                                 max_queue_size=10,
                                 workers=1,
                                 use_multiprocessing=True)

    # - Produce and consume data without a queue on main thread
    #   - Make sure the value of `use_multiprocessing` is ignored
    model.evaluate_generator(custom_generator(),
                             steps=STEPS,
                             max_queue_size=10,
                             workers=0,
                             use_multiprocessing=True)
Пример #39
0
    def train_neural_network(self):
        train_generator, validation_generator, test_datagen = self.prepare_data()
        num_classes = 53
        input_shape = (50, 15, 3)
        epochs = 17

        model = Sequential()
        model.add(Conv2D(64, (3, 3), input_shape=input_shape, activation='relu', padding='same'))
        model.add(Dropout(0.2))
        model.add(Conv2D(64, (2, 2), activation='relu', padding='same'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
        model.add(Dropout(0.2))
        model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
        model.add(Dropout(0.2))
        model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Flatten())
        model.add(Dropout(0.2))
        model.add(Dense(2048, activation='relu', kernel_constraint=maxnorm(3)))
        model.add(Dropout(0.2))
        model.add(Dense(1024, activation='relu', kernel_constraint=maxnorm(3)))
        model.add(Dropout(0.2))
        model.add(Dense(num_classes, activation='softmax'))

        model.compile(loss=keras.losses.sparse_categorical_crossentropy,
                      optimizer=keras.optimizers.Adam(),
                      metrics=['accuracy'])

        early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
                                                   min_delta=0,
                                                   patience=1,
                                                   verbose=1, mode='auto')
        tb = TensorBoard(log_dir='c:/tensorboard/pb',
                         histogram_freq=1,
                         write_graph=True,
                         write_images=True,
                         embeddings_freq=1,
                         embeddings_layer_names=False,
                         embeddings_metadata=False)

        model.fit_generator(train_generator,
                            steps_per_epoch=num_classes,
                            epochs=epochs,
                            verbose=1,
                            validation_data=validation_generator,
                            validation_steps=100,
                            callbacks=[early_stop])
        score = model.evaluate_generator(test_datagen, steps=52)
        print('Test loss:', score[0])
        print('Test accuracy:', score[1])

        class_mapping = train_generator.class_indices

        # serialize model to JSON
        class_mapping = dict((v, k) for k, v in class_mapping.items())
        with open(dir_path + "/model_classes.json", "w") as json_file:
            json.dump(class_mapping, json_file)
        model_json = model.to_json()
        with open(dir_path + "/model.json", "w") as json_file:
            json_file.write(model_json)
        # serialize weights to HDF5
        model.save_weights(dir_path + "/model.h5")
        print("Saved model to disk")
Пример #40
0
def test_multiprocessing_evaluate_error():
    arr_data = np.random.randint(0, 256, (50, 2))
    arr_labels = np.random.randint(0, 2, 50)
    batch_size = 10
    n_samples = 50
    good_batches = 3

    @threadsafe_generator
    def custom_generator():
        """Raises an exception after a few good batches"""
        for i in range(good_batches):
            batch_index = np.random.randint(0, n_samples - batch_size)
            start = batch_index
            end = start + batch_size
            X = arr_data[start: end]
            y = arr_labels[start: end]
            yield X, y
        raise RuntimeError

    model = Sequential()
    model.add(Dense(1, input_shape=(2,)))
    model.compile(loss='mse', optimizer='adadelta')

    # - Produce data on 4 worker processes, consume on main process:
    #   - Each worker process runs OWN copy of generator
    #   - BUT on Windows, `multiprocessing` won't marshall generators across
    #     process boundaries -> make sure `evaluate_generator()` raises ValueError
    #     exception and does not attempt to run the generator.
    #   - On other platforms, make sure `RuntimeError` exception bubbles up
    if os.name is 'nt':
        with pytest.raises(ValueError):
            model.evaluate_generator(custom_generator(),
                                     steps=good_batches * WORKERS + 1,
                                     max_queue_size=10,
                                     workers=WORKERS,
                                     use_multiprocessing=True)
    else:
        with pytest.raises(RuntimeError):
            model.evaluate_generator(custom_generator(),
                                     steps=good_batches * WORKERS + 1,
                                     max_queue_size=10,
                                     workers=WORKERS,
                                     use_multiprocessing=True)

    # - Produce data on 1 worker process, consume on main process:
    #   - Worker process runs generator
    #   - BUT on Windows, `multiprocessing` won't marshall generators across
    #     process boundaries -> make sure `evaluate_generator()` raises ValueError
    #     exception and does not attempt to run the generator.
    #   - On other platforms, make sure `RuntimeError` exception bubbles up
    if os.name is 'nt':
        with pytest.raises(RuntimeError):
            model.evaluate_generator(custom_generator(),
                                     steps=good_batches + 1,
                                     max_queue_size=10,
                                     workers=1,
                                     use_multiprocessing=True)
    else:
        with pytest.raises(RuntimeError):
            model.evaluate_generator(custom_generator(),
                                     steps=good_batches + 1,
                                     max_queue_size=10,
                                     workers=1,
                                     use_multiprocessing=True)

    # - Produce and consume data without a queue on main thread
    #   - Make sure the value of `use_multiprocessing` is ignored
    #   - Make sure `RuntimeError` exception bubbles up
    with pytest.raises(RuntimeError):
        model.evaluate_generator(custom_generator(),
                                 steps=good_batches + 1,
                                 max_queue_size=10,
                                 workers=0,
                                 use_multiprocessing=True)
def model03():
    mainmodel_start_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    print('Start model03(): ' + mainmodel_start_time)

    optimizer = Adagrad()
    # optimizer = Adadelta()
    # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.001)

    go_board_rows, go_board_cols = 19, 19
    num_classes = go_board_rows * go_board_cols
    num_games = 100

    one_plane_encoder = OnePlaneEncoder((go_board_rows, go_board_cols))
    seven_plane_encoder = SevenPlaneEncoder((go_board_rows, go_board_cols))
    simple_encoder = SimpleEncoder((go_board_rows, go_board_cols))

    encoder = seven_plane_encoder

    processor = GoDataProcessor(encoder=encoder.name())

    train_generator = processor.load_go_data('train',
                                             num_games,
                                             use_generator=True)
    test_generator = processor.load_go_data('test',
                                            num_games,
                                            use_generator=True)

    input_shape = (encoder.num_planes, go_board_rows, go_board_cols)

    network_large = large
    network_small = small

    network = network_small
    network_layers = network.layers(input_shape)
    model = Sequential()
    for layer in network_layers:
        model.add(layer)

    model.add(Dense(num_classes, activation='relu'))

    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    epochs = 5
    batch_size = 128
    model.fit_generator(
        generator=train_generator.generate(batch_size, num_classes),
        epochs=epochs,
        steps_per_epoch=train_generator.get_num_samples() / batch_size,
        validation_data=test_generator.generate(batch_size, num_classes),
        validation_steps=test_generator.get_num_samples() / batch_size,
        callbacks=[
            ModelCheckpoint(
                filepath=
                'D:\\CODE\\Python\\Go\\code\\dlgo\\data\\checkpoints\\small_epoch_{epoch:02d}'
                '-acc-{accuracy:.4f}-val_acc_{'
                'val_accuracy:.4f}f.h5',
                monitor='accuracy')
        ])
    model.evaluate_generator(
        generator=test_generator.generate(batch_size, num_classes),
        steps=test_generator.get_num_samples() / batch_size)
Пример #42
0
label_list_path = 'datasets/cifar-10-batches-py/batches.meta'


keras_dir = os.path.expanduser(os.path.join('~', '.keras'))
datadir_base = os.path.expanduser(keras_dir)
if not os.access(datadir_base, os.W_OK):
    datadir_base = os.path.join('/tmp', '.keras')
label_list_path = os.path.join(datadir_base, label_list_path)

with open(label_list_path, mode='rb') as f:
    labels = pickle.load(f)

# Evaluate model with test data set and share sample prediction results
evaluation = model.evaluate_generator(datagen.flow(x_test, y_test,
                                                   batch_size=batch_size,
                                                   shuffle=False),
                                      steps=x_test.shape[0] // batch_size,
                                      workers=4)
print('Model Accuracy = %.2f' % (evaluation[1]))

predict_gen = model.predict_generator(datagen.flow(x_test, y_test,
                                                   batch_size=batch_size,
                                                   shuffle=False),
                                      steps=x_test.shape[0] // batch_size,
                                      workers=4)

for predict_index, predicted_y in enumerate(predict_gen):
    actual_label = labels['label_names'][np.argmax(y_test[predict_index])]
    predicted_label = labels['label_names'][np.argmax(predicted_y)]
    print('Actual Label = %s vs. Predicted Label = %s' % (actual_label,
                                                          predicted_label))
Пример #43
0
                tempHist = model.fit_generator(generator=training_generator,
                                    validation_data=validation_generator,
                                    validation_steps=int(len(partition["validation"])/batch_size) if int(len(partition["validation"])/batch_size)>1 else 1,
                                    epochs=epochs,
                                    use_multiprocessing=False,
                                    workers=1,
                                    max_queue_size=20,
                                    verbose=1
                                    #,callbacks=[history]
                                    )            
                
                
                score = model.evaluate_generator(generator = test_generator,
                                                 steps=int(len(partition["validation"])/batch_size) if int(len(partition["validation"])/batch_size)>1 else 1,
                                                 use_multiprocessing=False,
                                                 workers=1,
                                                 max_queue_size=20,
                                                 verbose=0
                                                 )
                

                print(score)
                
                temp={"loss":score[0],"validateAccuracy":score[1]}
                tempScores.append(temp.copy())

                del tempHist
                del score
                del model
                
                #sess.close()
Пример #44
0
def test_sequential():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    # TODO: factor out
    def data_generator(train):
        if train:
            max_batch_index = len(X_train) // batch_size
        else:
            max_batch_index = len(X_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (X_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    model = Sequential()
    model.add(Dense(nb_hidden, input_shape=(input_dim,)))
    model.add(Activation('relu'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.summary()

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    gen_loss = model.evaluate_generator(data_generator(True), 256, verbose=0)
    assert(gen_loss < 0.8)

    loss = model.evaluate(X_test, y_test, verbose=0)
    assert(loss < 0.8)

    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)
    model.get_config(verbose=0)

    fname = 'test_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)
    model = Sequential()
    model.add(Dense(nb_hidden, input_shape=(input_dim,)))
    model.add(Activation('relu'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert(loss == nloss)

    # test json serialization
    json_data = model.to_json()
    model = model_from_json(json_data)

    # test yaml serialization
    yaml_data = model.to_yaml()
    model = model_from_yaml(yaml_data)
Пример #45
0
# SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg'))

#################################################################
#3.모델 학습과정 설정하기
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

##################################################################
#4.모델 학습시키기
# train = 60, validation = 20, batch=5
model.fit_generator(train_generator,
                    steps_per_epoch=12 * 10,
                    epochs=5,
                    validation_data=test_generator,
                    validation_steps=4)

######################################################################
#5.모델 평가하기
print("--Evaluate--")
scores = model.evaluate_generator(test_generator, steps=4)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1] * 100))

###################################################################
#6.모델 사용하기
print("--Predict--")
output = model.predict_generator(test_generator, steps=4)
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
print(test_generator.class_indices)
print(output)
Пример #46
0
model.add(Dense(128, activation='tanh'))

model.add(Dense(3, activation='softmax'))
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

print(model.summary())

stop = EarlyStopping(monitor='val_acc', patience=5)
log = TensorBoard()
model.fit_generator(
    generator=motionDataset.randgeneratorReduced(data, idx_train,
                                                 sequence_len),
    validation_data=motionDataset.randgeneratorReduced(data, idx_val,
                                                       sequence_len),
    steps_per_epoch=20000,
    validation_steps=6000,
    epochs=100,
    callbacks=[stop, log])

scores = model.evaluate_generator(generator=motionDataset.randgeneratorReduced(
    data, idx_test, sequence_len),
                                  steps=25000)
print("Accuracy: %.2f%%" % (scores[1] * 100))
motionDataset.confusionMatrix(
    model, motionDataset.randgeneratorReduced(data, idx_test, sequence_len),
    25000, class_names)
#model.save('CNN_model_phone_acc.h5')
motionDataset.exportmodel('CNN_Phone_acc', 'conv1d_1_input', 'dense_2/Softmax')
Пример #47
0
plt.title('Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()

## Accuracy
plt.figure(2)
for l in acc_list:
    plt.plot(epochs,
             history.history[l],
             'b',
             label='Training accuracy (' +
             str(format(history.history[l][-1], '.5f')) + ')')
for l in val_acc_list:
    plt.plot(epochs,
             history.history[l],
             'g',
             label='Validation accuracy (' +
             str(format(history.history[l][-1], '.5f')) + ')')

plt.title('Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()

# test model
score = model.evaluate_generator(generator=test_generator, steps=20)

print('Test accuracy:', score[1])
Пример #48
0
def test_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    # TODO: factor out
    def data_generator(x, y, batch_size=50):
        index_array = np.arange(len(x))
        while 1:
            batches = make_batches(len(x_test), batch_size)
            for batch_index, (batch_start, batch_end) in enumerate(batches):
                batch_ids = index_array[batch_start:batch_end]
                x_batch = x[batch_ids]
                y_batch = y[batch_ids]
                yield (x_batch, y_batch)

    model = Sequential()
    model.add(Dense(num_hidden, input_shape=(input_dim,)))
    model.add(Activation('relu'))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1,
              validation_data=(x_test, y_test))
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2,
              validation_split=0.1)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1,
              shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test)

    prediction = model.predict_generator(data_generator(x_test, y_test), 1,
                                         max_queue_size=2, verbose=1)
    gen_loss = model.evaluate_generator(data_generator(x_test, y_test, 50), 1,
                                        max_queue_size=2)
    pred_loss = K.eval(K.mean(losses.get(model.loss)(K.variable(y_test),
                                                     K.variable(prediction))))

    assert(np.isclose(pred_loss, loss))
    assert(np.isclose(gen_loss, loss))

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)
    model = Sequential()
    model.add(Dense(num_hidden, input_shape=(input_dim,)))
    model.add(Activation('relu'))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert(loss == nloss)

    # Test serialization
    config = model.get_config()
    assert 'name' in config
    new_model = Sequential.from_config(config)
    assert new_model.weights  # Model should be built.

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
Пример #49
0
def test_sequential():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    # TODO: factor out
    def data_generator(x, y, batch_size=50):
        index_array = np.arange(len(x))
        while 1:
            batches = make_batches(len(X_test), batch_size)
            for batch_index, (batch_start, batch_end) in enumerate(batches):
                batch_ids = index_array[batch_start:batch_end]
                x_batch = x[batch_ids]
                y_batch = y[batch_ids]
                yield (x_batch, y_batch)

    model = Sequential()
    model.add(Dense(nb_hidden, input_shape=(input_dim,)))
    model.add(Activation("relu"))
    model.add(Dense(nb_class))
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test)

    prediction = model.predict_generator(data_generator(X_test, y_test), X_test.shape[0], max_q_size=2)
    gen_loss = model.evaluate_generator(data_generator(X_test, y_test, 50), X_test.shape[0], max_q_size=2)
    pred_loss = K.eval(K.mean(objectives.get(model.loss)(K.variable(y_test), K.variable(prediction))))

    assert np.isclose(pred_loss, loss)
    assert np.isclose(gen_loss, loss)

    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)

    fname = "test_sequential_temp.h5"
    model.save_weights(fname, overwrite=True)
    model = Sequential()
    model.add(Dense(nb_hidden, input_shape=(input_dim,)))
    model.add(Activation("relu"))
    model.add(Dense(nb_class))
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert loss == nloss

    # test serialization
    config = model.get_config()
    new_model = Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    new_model = model_from_json(json_str)

    yaml_str = model.to_yaml()
    new_model = model_from_yaml(yaml_str)