示例#1
0
                np.expand_dims(cv.resize(image, (224, 224)), 0))
            labels[count % batch_size] = np.expand_dims(
                to_categorical(label, num_classes=num_classes), 0)

            count += 1
            if (count % batch_size == 0):
                yield images, labels


# Infer on ImageNet
labels = np.zeros((Nvalidation))
pred_labels = np.zeros((Nvalidation, C))
pred_labels_new = np.zeros((Nvalidation, C))

score = model.evaluate_generator(imagenet_generator(validation_dataset,
                                                    batch_size=32),
                                 steps=Nvalidation // Nbatch,
                                 verbose=1)
print("Evaluation Result of Original Model on ImageNet2012: " + str(score))

# Train on ImageNet
checkpoint_path = "Mobilenet/cp-{epoch:04d}.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
os.makedirs(checkpoint_dir, exist_ok=True)

cp_callback = keras.callbacks.ModelCheckpoint(
    checkpoint_path,
    verbose=1,
    save_weights_only=True,
    # Save weights, every 1-epoch
    period=1)
示例#2
0
    exit(1)

model.summary()

print("TRAINING PHASE")

decay = lrate / epochs

sgd = SGD(lr=lrate, momentum=0.9, decay=decay, nesterov=False)
model.compile(loss='categorical_crossentropy',
              optimizer=sgd,
              metrics=['accuracy'])

tensorboard = TensorBoard(log_dir=logdir, histogram_freq=0, write_graph=True)
tensorboard.set_model(model)

history = model.fit_generator(train_generator,
                              epochs=epochs,
                              validation_data=validation_generator,
                              callbacks=[tensorboard])

model.save("leaf.%s.h5" % runmodel)

print("TESTING PHASE")

evaltest = model.evaluate_generator(test_generator, 1)
for name, val in zip(model.metrics_names, evaltest):
    print(name, val)

print("END", datetime.datetime.now().isoformat())
# mse = losses.mean_squared_error
adam = Adam(lr=0.001)

model.compile(loss='categorical_crossentropy',
              optimizer=adam,
              metrics=['accuracy'])

# Fit the model
# f = open("output/log.csv","w+")
# csv_logger = CSVLogger('/output/log.csv', append=True, separator=',')
tb_callback = TensorBoard(log_dir='/output/logs',
                          histogram_freq=0,
                          batch_size=batch_size)
# model.fit(train_features, train_labels, epochs=128, batch_size=batch_size,  verbose=2, callbacks=[csv_logger])
model.fit_generator(train_generator,
                    epochs=150,
                    steps_per_epoch=(1400 / batch_size) + 1,
                    verbose=2,
                    callbacks=[tb_callback])
model.save("/output/mobnet.h5")
score, acc = model.evaluate_generator(test_generator,
                                      steps=(550 / batch_size) + 1)
# calculate predictions
# pred = model.predict(test_features)
# print(test_labels.shape,pred.shape)
# print(test_labels[0],pred[0])
target_names = ['blade', 'gun', 'others', 'shuriken']
print("Test score: " + str(score))
print("Test accuracy: " + str(acc))

# print(classification_report(test_labels, pred,target_names=target_names))
示例#4
0
if earlyStopFlag:
    callbacksList.append(earlyStop)
if reduceLRFlag:
    callbacksList.append(reduce_lr)
if modelCheckpointFlag:
    callbacksList.append(modelCheckpoint)

history = model.fit_generator(
    trainGenerator,
    steps_per_epoch=trainSamplesNumber // batchSize * foldAugment,
    epochs=epochs,
    verbose=1,
    callbacks=callbacksList,
    validation_data=validationGenerator,
    class_weight=getClassWeights(trainGenerator.classes),
    shuffle=True,
    validation_steps=validateSamplesNumber // batchSize)

score = model.evaluate_generator(testGenerator, testSamplesNumber)

print('Test loss:', score[0])
print('Test accuracy:', score[1])

# serialize model to JSON
model_json = model.to_json()
with open(modelFile, "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(weightsFile)
print("Saved model to disk")