def main():
	# construct the argument parser and parse the arguments
	ap = argparse.ArgumentParser()
	ap.add_argument("-d", "--dataset", required=True,
		help="path to input dataset of images")
	ap.add_argument("-m", "--model", required=True,
		help="path to output trained model")
	ap.add_argument("-l", "--label-bin", required=True,
		help="path to output label binarizer")
	ap.add_argument("-p", "--plot", required=True,
		help="path to output accuracy/loss plot")
	args = vars(ap.parse_args())

	# initialize the data and labels
	print("[INFO] loading images...")

	# grab the image paths and randomly shuffle them
	image_path = args["dataset"] + "/train/"
	data, labels = split(image_path)
	(trainX, valX, trainY, valY) = train_test_split(data, labels, test_size=0.25)

	# convert the labels from integers to vectors (for 2-class, binary
	# classification you should use Keras' to_categorical function
	# instead as the scikit-learn's LabelBinarizer will not return a
	# vector)
	lb = LabelBinarizer()
	trainY = lb.fit_transform(trainY)
	valY = lb.transform(valY)

	# construct the image generator for data augmentation
	aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1,
		height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,
		horizontal_flip=True, fill_mode="nearest")

	# base_model = InceptionV3(input_shape=(HEIGHT, WIDTH, 3),
							#  weights = 'imagenet', 
							#  include_top = False, 
							#  pooling = 'avg')
	# base_model = InceptionResNetV2(input_shape=(HEIGHT, WIDTH, 3),
							#  weights = 'imagenet', 
							#  include_top = False, 
							#  pooling = 'avg')
	base_model = Xception(input_shape=(HEIGHT, WIDTH, 3),
							 weights = 'imagenet', 
							 include_top = False, 
							 pooling = 'avg')
	x = base_model.output
	x = Dense(1024, activation="relu")(x)
	x = Dropout(DROPOUT)(x)
	predictions = Dense(6, activation="softmax")(x)

	model = Model(inputs=base_model.input, outputs=predictions)

	# initialize the model and optimizer
	print("[INFO] training network...")
	opt = SGD(lr=INIT_LR, momentum = 0.9, clipnorm = 5.)
	model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])

	# saver = ModelCheckpoint("output/model.hdf5", verbose=1,
                            # save_best_only=True, monitor="val_acc",
                            # mode="max")
	# reduce_lr = ReduceLROnPlateau(monitor="loss", factor=0.5,
                                #   patience=5, verbose=1, min_lr=0.0001)
	stopper = EarlyStopping(patience=5, verbose=1, monitor="val_acc", mode="max")
	clr = CyclicLR(base_lr=INIT_LR, max_lr=0.005, step_size=8*len(trainX)//BS, mode='triangular2')
	# train the network
	H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS),
		validation_data=(valX, valY), steps_per_epoch=len(trainX) // BS,
		validation_steps=len(valX) // BS,
		epochs=EPOCHS, callbacks=[stopper, clr])

	##########################
	## EVALUATE THE NETWORK ##
	##########################
	print("[INFO] evaluating network...")
	image_path = args["dataset"] + "/test/"
	testX, testY = split(image_path)
	testY = lb.transform(testY)

	predictions = model.predict(testX, batch_size=BS)
	print(classification_report(testY.argmax(axis=1),
		predictions.argmax(axis=1), target_names=lb.classes_))
	print("Accuracy: {}".format(accuracy_score(testY.argmax(axis=1), predictions.argmax(axis=1))))
	print(confusion_matrix(testY.argmax(axis=1), predictions.argmax(axis=1)))

	# save the model and label binarizer to disk
	print("[INFO] serializing network and label binarizer...")
	model.save(args["model"])
	f = open(args["label_bin"], "wb")
	f.write(pickle.dumps(lb))
	f.close()

	print("[INFO] plotting and saving results...")
	# plot the training loss and accuracy
	N = np.arange(0, EPOCHS) if stopper.stopped_epoch == 0 else np.arange(0, stopper.stopped_epoch+1)
	# Se não parou antes então stopper.stopped_epoch será 0
	plt.style.use("ggplot")
	plt.figure()
	plt.plot(N, H.history["loss"], label="train_loss")
	plt.plot(N, H.history["val_loss"], label="val_loss")
	plt.plot(N, H.history["acc"], label="train_acc")
	plt.plot(N, H.history["val_acc"], label="val_acc")
	plt.title("Training Loss and Accuracy (Xception)")
	plt.xlabel("Epoch #")
	plt.ylabel("Loss/Accuracy")
	plt.legend()
	plt.savefig(args["plot"])

	plot_confusion_matrix(lb.classes_, testY.argmax(axis=1), predictions.argmax(axis=1))

	plt.style.use("ggplot")
	plt.figure()
	plt.plot(clr.history['lr'], clr.history['acc'])
	plt.title("Training Learning Rate and Accuracy (Xception)")
	plt.xlabel("Learning Rate")
	plt.ylabel("Accuracy")
	plt.legend()
	plt.savefig("output/learning_rate_Xception.png")
	print("Dropout: {} BS: {}".format(DROPOUT, BS))
print("[INFO] training network...")
#[CODE HERE] - fit model (1 line)
H = model.fit(trainX,
              trainY,
              validation_data=(testX, testY),
              batch_size=32,
              epochs=num_epocas,
              verbose=1)
#[END CODE]

# evaluate the network
print("[INFO] evaluating network...")
predictions = model.predict(testX, batch_size=32)
print(
    classification_report(testY.argmax(axis=1),
                          predictions.argmax(axis=1),
                          target_names=["cat", "dog", "panda"]))

# plot the training loss and accuracy
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, num_epocas), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, num_epocas), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, num_epocas), H.history["acc"], label="train_acc")
plt.plot(np.arange(0, num_epocas), H.history["val_acc"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.show()
示例#3
0
# train the network
print("[INFO] training network...")

H = model.fit_generator(aug.flow(trainX_res, trainY_res, batch_size=BATCH_SIZE),
                        validation_data=(testX, testY),
                        epochs=EPOCHS,
                        callbacks=[
                            ModelCheckpoint('XXX.h5',
                                            monitor='val_acc',
                                            save_best_only=True)])

model.load_weights('XXX.h5')

predictions = model.predict(testX, batch_size=BATCH_SIZE)

print(confusion_matrix(testY.argmax(axis=1), predictions.argmax(axis=1)))
print(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1), target_names=lb.classes_))

acc = H.history['acc']
val_acc = H.history['val_acc']
loss = H.history['loss']
val_loss = H.history['val_loss']

epochs = range(1, len(acc) + 1)

plt.plot(epochs, acc, 'b', label='Training accuracy')
plt.plot(epochs, val_acc, 'r', label='Validation accuracy')
plt.title('<Model>: Training and Validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
示例#4
0
文件: vae.py 项目: chamomi/celtic
                  loss="categorical_crossentropy",
                  metrics=['acc'])
    model.fit(train_data,
              train_target,
              batch_size=128,
              epochs=15,
              verbose=1,
              validation_split=0.2)

    print('FF')
    scores = model.evaluate(test_data, test_target, verbose=2)
    print(scores)

    y_pred = model.predict(test_data)
    y_norm = np.stack([
        to_categorical(np.asarray(x.argmax()), num_classes=4) for x in y_pred
    ])
    print(classification_report(test_target, y_norm))
    print('MCC: ', matthews_corrcoef(y_test, np.argmax(y_norm, axis=1)))
    print("classes: 0-cy, 1-en, 2-ga, 3-gd")

    print_cm(y_test, np.argmax(y_norm, axis=1))

    # SVM
    print('SVM')
    svm = SVC(kernel='linear', C=1)
    text_clf_svm = svm.fit(train_data, y_train)

    y_pred = text_clf_svm.predict(test_data)
    SVM_acc = np.mean(y_pred == y_test)
    print("SVM accuracy ", SVM_acc)
model.compile(loss = 'binary_crossentropy',optimizer = adam ,metrics = ['accuracy'])
model.summary()
a,b = 0,2
model.fit(train_X,train_Y[:,a:b],batch_size = batch_size,epochs = 100,validation_data = (test_X,test_Y[:,a:b]))
scores = model.evaluate(test_X, test_Y[:,a:b], verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
plot_model(model, to_file='model.png',show_shapes = True)


#saving the model
model.save('my_model_cSIN_tag')
doc_model.save('doc_model_cSIN')

pred = model.predict(test_X)
pred = pred.argmax(axis=1)
    
#printing the metrics
c_matrix = confusion_matrix(test_Y[:,a:b].argmax(axis=1),pred)
print(c_matrix)
accuracy = accuracy_score(test_Y[:,a:b].argmax(axis=1),pred)
print('Accuracy : ',accuracy)
#precision = true positive / total predicted positive(True positive + False positive)
#recall = true positive / total actual positive(True positive + False Negative)
print(classification_report(test_Y[:,a:b].argmax(axis=1),pred))

pickle_out = open("tokenizer.pickle","wb")
pickle.dump(tokenizer, pickle_out)
pickle_out.close()

示例#6
0
import numpy as np
from keras.preprocessing import image
from keras.models import load_model
def predict(model, img):
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = model.predict(x)
    print(preds)
    return preds[0]
img = image.load_img('french_bulldog.jpg', target_size=(299, 299))
preds = predict(load_model(MODEL_FILE), img)

print(len(preds))
print(preds.argmax())

labels = (train_generator.class_indices)
labels = dict((v,k) for k,v in labels.items())

# labels

print(labels[preds.argmax()])

# !pip install keras-vis

from vis.utils import utils
from matplotlib import pyplot as plt
# %matplotlib inline
plt.rcParams['figure.figsize'] = (18, 6)
img1 = utils.load_img('Images/basenji/n02110806_1396.jpg', target_size=(224, 224))
示例#7
0
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
        print("Normalized confusion matrix")
    else:
        print('Confusion matrix, without normalization')

    print(cm)

    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(classes))
    plt.xticks(tick_marks, classes, rotation=45)
    plt.yticks(tick_marks, classes)

    fmt = '.2f' if normalize else 'd'
    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        plt.text(j, i, format(cm[i, j], fmt),
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.tight_layout()
predfinal = predictions.argmax(axis=1)
cm = confusion_matrix(test_labels, predfinal)

cm_plot_labels = ['akiec', 'bcc', 'bkl', 'df', 'mel','nv', 'vasc']

plot_confusion_matrix(cm, cm_plot_labels)
  model.compile(loss = 'binary_crossentropy',optimizer = adam ,metrics = ['accuracy'])
  #model.summary()

  a,b = 0,4
  model.fit(X[train],Y[train][:,a:b],batch_size = batch_size,epochs = 10,validation_data = (X[test],Y[test][:,a:b]))
  scores = model.evaluate(X[test], Y[test][:,a:b], verbose=0)
  print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))

  pred = model.predict(X[test])
  final_probability_0 = pred[:,0]
  final_probability_1 = pred[:,1]
  final_probability_2 = pred[:,2]
  final_probability_3 = pred[:,3]

  for i in range(len(X[test])):
    final_predicted_labels.append(pred.argmax(axis=1)[i])
    final_probability_class0.append(final_probability_0[i])
    final_probability_class1.append(final_probability_1[i])
    final_probability_class2.append(final_probability_2[i])
    final_probability_class3.append(final_probability_3[i])

  c_matrix = confusion_matrix(Y[test][:,a:b].argmax(axis=1),pred.argmax(axis=1))
  print(c_matrix)
  print(classification_report(Y[test][:,a:b].argmax(axis=1),pred.argmax(axis=1)))

  plot_model(model, to_file='model.png',show_shapes = True)
  model.save('my_model_cSIN2_tag')

  accuracy.append(accuracy_score(Y[test][:,a:b].argmax(axis=1),pred.argmax(axis=1)))
  print(accuracy)
  f1_scores.append(f1_score(Y[test][:,a:b].argmax(axis=1), pred.argmax(axis=1), average='macro'))
示例#9
0
y_true = list(target_list)
print(y_true)


model.load_weights('/content/gdrive/My Drive/Syrus/weights_inception_full/cp-0030.ckpt')
path_test='/content/train_test/test'
for names in sorted(os.listdir(path_test)):
  print(names)
  img = image.load_img(os.path.join(path_test,names), target_size=(224, 224))
  x = image.img_to_array(img)
  x = np.expand_dims(x, axis=0)
  x = preprocess_input(x)
  preds = model.predict(x)
  #print('Predicted:')
  #print(preds)
  y_classes = preds.argmax(axis=-1)
  print(y_classes)
 
  final.append(y_classes[0])
print(final)

score = accuracy_score(y_true, final)
print(score)

!pip install h5py
!pip install tensorflowjs

model.save('inception_v3_1.h5')

!mkdir model100
!tensorflowjs_converter --input_format keras inception_v3_1.h5 model100/
示例#10
0
print("End training model")
end = time.time()
print(time.strftime("%H:%M:%S", time.gmtime(end - start)))

evaluateModelPerformance()

cm_plot_labels = []

for x in test_batches.class_indices:
    cm_plot_labels.append(x)

predictions = model.predict(test_batches, steps=5, verbose=0)

test_labels = test_batches.classes

cm = confusion_matrix(test_labels, predictions.argmax(1))

plot_confusion_matrix(cm, cm_plot_labels)

#create coreml model
coreml_model = coremltools.converters.keras.convert(
    model,
    input_names='input_9',
    image_input_names='input_9',
    output_names='Identity',
    class_labels=cm_plot_labels,
    image_scale=1 / 255.0)

coreml_model.save('modelSzczecin.mlmodel')

for index, layer in enumerate(mobile.layers):