Beispiel #1
0
adam = Adam(lr=0.040)
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss="categorical_crossentropy",
              optimizer='adam',
              metrics=['accuracy'])
if os.path.isfile(weights_file):
    print('Loading weights: %s' % weights_file)
    model.load_weights(weights_file, by_name=True)

print('Fitting model')
# model.fit(images, classes, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_split=0.2, initial_epoch=0)
model.fit_generator(training_data,
                    samples_per_epoch=samples_per_epoch,
                    validation_data=validation_data,
                    nb_val_samples=nb_val_samples,
                    nb_epoch=nb_epoch,
                    verbose=1,
                    initial_epoch=initial_epoch)
print("Finished fitting model")

print('Saving weights')
model.save_weights(weights_file, overwrite=True)
print('Evaluating model')
# score = model.evaluate(images, classes, verbose=1)

# validation_data = ( (load_image(x), to_categorical([y], nb_classes=nb_classes)) for x, y in test )
# validation_data = gen(X_test, Y_test)
score = model.evaluate_generator(validation_data, val_samples=nb_val_samples)
# score = model.evaluate(X_test, Y_test, verbose=1)
print('result: %s' % score)
Beispiel #2
0
    print('Loading weights: %s' % weights_file)
    model.load_weights(weights_file, by_name=True)
else:
    # if it is a new model, set balanced bias for the last layer
    w, _ = model.get_layer('conv10').get_weights()
    model.get_layer('conv10').set_weights(
        [w, np.log(class_weight / sum(class_weight))])

print('Fitting model')
# Balance class weights by frequency and give preference for RED lights.
class_weight = class_weight.compute_class_weight(
    'balanced', np.unique(train_generator.classes), train_generator.classes)
class_weight[0] *= 2

model.fit_generator(train_generator,
                    samples_per_epoch=samples_per_epoch,
                    validation_data=val_generator,
                    validation_steps=nb_val_samples // 64,
                    nb_epoch=nb_epoch,
                    verbose=1,
                    initial_epoch=initial_epoch,
                    class_weight=class_weight)
print("Finished fitting model")

print('Saving weights')
model.save_weights(weights_file, overwrite=True)
print('Evaluating model')
score = model.evaluate_generator(val_generator,
                                 steps=int(samples_per_epoch / nb_val_samples))
print('result: %s' % score)
Beispiel #3
0
plt.plot(model.history.history['val_top_5_accuracy'], label='val')
plt.legend()
plt.xlabel('epoch')
plt.ylabel('top5_accuracy')

# log this plot to the aml workspace so we can see it in the azure portal
if remote_execution:
    run.log_image('top5_accuracy', plot=plt)
else:
    plt.savefig('top5_accuracy.png')
plt.close()
# # Results

# In[22]:

val_generator_no_shuffle = data_generator.flow_from_directory(
    os.path.join(data_dir, 'val_no_resizing'),
    val_logits,
    target_size=(299, 299),
    batch_size=64,
    shuffle=False)

# In[23]:

# val_loss, val_acc, val_top_k_categorical_accuracy
if remote_execution:
    run.log_list('final eval',
                 model.evaluate_generator(val_generator_no_shuffle, 80))
else:
    print(model.evaluate_generator(val_generator_no_shuffle, 80))
Beispiel #4
0
# log this plot to the aml workspace so we can see it in the azure portal
if remote_execution:
    run.log_image('Accuracy', plot=plt)
else:
    plt.savefig('accuracy.png')
plt.close()
# In[ ]:

plt.plot(model.history.history['top_k_categorical_accuracy'], label='train')
plt.plot(model.history.history['val_top_k_categorical_accuracy'], label='val')
plt.legend()
plt.xlabel('epoch')
plt.ylabel('top5_accuracy')

# log this plot to the aml workspace so we can see it in the azure portal
if remote_execution:
    run.log_image('Top k acc', plot=plt)
else:
    plt.savefig('top_k_acc.png')
plt.close()

# # Results

# In[ ]:

# val_loss, val_acc, val_top_k_categorical_accuracy
if remote_execution:
    run.log_list('final eval', model.evaluate_generator(val_generator, 80))
else:
    print(model.evaluate_generator(val_generator, 80))