out = Dense(num_classes, activation="softmax", name="output")(last_layer) model = Model(image_input, out) for layer in model.layers[:-1]: layer.trainable = False model.summary() model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), metrics=['accuracy']) hist = model.fit(train_X, train_Y, batch_size=16, epochs=30, verbose=1, validation_data=(test_X, test_Y)) (loss, accuracy) = model.evaluate(test_X, test_Y, batch_size=8, verbose=1) prediction = model.predict(test_X) predict = np.argmax(prediction, axis=1) print("Save Model?y/n") a = input() if (a == "y"): print("Saving Model") model.save("RESRetry14.h5", overwrite=True, include_optimizer=True) else: print("Over")
def main(train_epochs): print('Hello Lenin Welcome to Transfer Learning with VGG16') # Reading images to form X vector labels_name = {'benign': 0, 'malignant': 1} img_data, img_labels = read_dataset('/data_roi_single/train', labels_dict=labels_name) print(np.unique(img_labels, return_counts=True)) # categories_names = ['benign', 'malignant'] num_classes = 2 # labels = labelling_outputs(num_classes, img_data.shape[0]) # labels = labelling_mammo(num_classes, img_data.shape[0]) # converting class labels to one-hot encoding y_one_hot = to_categorical(img_labels, num_classes) #Shuffle data x, y = shuffle(img_data, y_one_hot, random_state=2) # Dataset split xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.2, random_state=2) ######################################################################################### # Custom_vgg_model_1 # Training the classifier alone image_input = Input(shape=(224, 224, 3)) model = VGG16(input_tensor=image_input, include_top=True, weights='imagenet') model.summary() last_layer = model.get_layer('fc2').output out = Dense(num_classes, activation='sigmoid', name='vgg16TL')(last_layer) # sigmoid insted of softmax custom_vgg_model = Model(image_input, out) custom_vgg_model.summary() # until this point the custom model is retrainable at all layers # Now we freeze all the layers up to the last one for layer in custom_vgg_model.layers[:-1]: layer.trainable = False custom_vgg_model.summary() # custom_vgg_model.layers[3].trainable # custom_vgg_model.layers[-1].trainable # Model compilation custom_vgg_model.compile( loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) # binary cross entropy instead of categorical print('Transfer Learning Training...') t = time.time() num_of_epochs = train_epochs # User defines number of epochs hist = custom_vgg_model.fit(xtrain, ytrain, batch_size=64, epochs=num_of_epochs, verbose=1, validation_data=(xtest, ytest)) print('Training time: %s' % (time.time() - t)) # Model saving parameters custom_vgg_model.save('vgg16_tf_bc.h5') print('Evaluation...') (loss, accuracy) = custom_vgg_model.evaluate(xtest, ytest, batch_size=10, verbose=1) print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss, accuracy * 100)) print("Finished") # Model Training Graphics # Visualizing losses and accuracy train_loss = hist.history['loss'] val_loss = hist.history['val_loss'] train_acc = hist.history['acc'] val_acc = hist.history['val_acc'] xc = range(num_of_epochs) # Este valor esta anclado al numero de epocas plt.figure(1, figsize=(7, 5)) plt.plot(xc, train_loss) plt.plot(xc, val_loss) plt.xlabel('num of epochs') plt.ylabel('loss') plt.title('train_loss vs val_loss') plt.grid(True) plt.legend(['train', 'val']) plt.style.use(['classic']) # revisar que mas hay plt.savefig('vgg16_train_val_loss.jpg') plt.figure(2, figsize=(7, 5)) plt.plot(xc, train_acc) plt.plot(xc, val_acc) plt.xlabel('num of epochs') plt.ylabel('accuracy') plt.title('train_accuracy vs val_accuracy') plt.grid(True) plt.legend(['train', 'val'], loc=4) plt.style.use(['classic']) # revisar que mas hay plt.savefig('vgg16_train_val_acc.jpg') plt.show()