def create_model(input_dict_size, output_dict_size, input_length=DEFAULT_INPUT_LENGTH, output_length=DEFAULT_OUTPUT_LENGTH): encoder_input = Input(shape=(input_length, )) decoder_input = Input(shape=(output_length, )) encoder = Embedding(input_dict_size, 64, input_length=input_length, mask_zero=True)(encoder_input) encoder = LSTM(64, return_sequences=False)(encoder) decoder = Embedding(output_dict_size, 64, input_length=output_length, mask_zero=True)(decoder_input) decoder = LSTM(64, return_sequences=True)(decoder, initial_state=[encoder, encoder]) decoder = TimeDistributed(Dense(output_dict_size, activation="softmax"))(decoder) model = Model(inputs=[encoder_input, decoder_input], outputs=[decoder]) model.compile(optimizer='adam', loss='categorical_crossentropy') return model
image_input = Input(shape=(224, 224, 3)) model = VGG19(input_tensor=image_input, include_top=True, weights=None) last_layer = model.get_layer('fc2').output #VGG # last_layer = model.get_layer('fc1000').output #RES # last_layer = model.get_layer('predictions').output last_layer = Dropout(0.5)(last_layer) out = Dense(num_classes, activation="softmax", name="output")(last_layer) model = Model(image_input, out) for layer in model.layers[:-1]: layer.trainable = False model.summary() model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0), metrics=['accuracy']) hist = model.fit(train_X, train_Y, batch_size=16, epochs=30, verbose=1, validation_data=(test_X, test_Y)) (loss, accuracy) = model.evaluate(test_X, test_Y, batch_size=8, verbose=1) prediction = model.predict(test_X) predict = np.argmax(prediction, axis=1) print("Save Model?y/n") a = input()
import tensorflow as tf from tensorflow._api.v1.keras import preprocessing from tensorflow._api.v1.keras.layers import Input, Dense, Conv2D, MaxPool2D, Dropout, ReLU, BatchNormalization, concatenate, Flatten, GlobalAveragePooling2D from tensorflow._api.v1.keras.models import Model # This returns a tensor inputs = Input(shape=(32, 32, 3)) pre_net = Conv2D(64, (7, 7), strides=(2, 2)) # a layer instance is callable on a tensor, and returns a tensor x = Dense(64, activation='relu')(inputs) x = Dense(64, activation='relu')(x) predictions = Dense(10, activation='softmax')(x) # This creates a model that includes # the Input layer and three Dense layers model = Model(inputs=inputs, outputs=predictions) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) model.summary()
def main(train_epochs): print('Hello Lenin Welcome to Transfer Learning with VGG16') # Reading images to form X vector labels_name = {'benign': 0, 'malignant': 1} img_data, img_labels = read_dataset('/data_roi_single/train', labels_dict=labels_name) print(np.unique(img_labels, return_counts=True)) # categories_names = ['benign', 'malignant'] num_classes = 2 # labels = labelling_outputs(num_classes, img_data.shape[0]) # labels = labelling_mammo(num_classes, img_data.shape[0]) # converting class labels to one-hot encoding y_one_hot = to_categorical(img_labels, num_classes) #Shuffle data x, y = shuffle(img_data, y_one_hot, random_state=2) # Dataset split xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.2, random_state=2) ######################################################################################### # Custom_vgg_model_1 # Training the classifier alone image_input = Input(shape=(224, 224, 3)) model = VGG16(input_tensor=image_input, include_top=True, weights='imagenet') model.summary() last_layer = model.get_layer('fc2').output out = Dense(num_classes, activation='sigmoid', name='vgg16TL')(last_layer) # sigmoid insted of softmax custom_vgg_model = Model(image_input, out) custom_vgg_model.summary() # until this point the custom model is retrainable at all layers # Now we freeze all the layers up to the last one for layer in custom_vgg_model.layers[:-1]: layer.trainable = False custom_vgg_model.summary() # custom_vgg_model.layers[3].trainable # custom_vgg_model.layers[-1].trainable # Model compilation custom_vgg_model.compile( loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) # binary cross entropy instead of categorical print('Transfer Learning Training...') t = time.time() num_of_epochs = train_epochs # User defines number of epochs hist = custom_vgg_model.fit(xtrain, ytrain, batch_size=64, epochs=num_of_epochs, verbose=1, validation_data=(xtest, ytest)) print('Training time: %s' % (time.time() - t)) # Model saving parameters custom_vgg_model.save('vgg16_tf_bc.h5') print('Evaluation...') (loss, accuracy) = custom_vgg_model.evaluate(xtest, ytest, batch_size=10, verbose=1) print("[INFO] loss={:.4f}, accuracy: {:.4f}%".format(loss, accuracy * 100)) print("Finished") # Model Training Graphics # Visualizing losses and accuracy train_loss = hist.history['loss'] val_loss = hist.history['val_loss'] train_acc = hist.history['acc'] val_acc = hist.history['val_acc'] xc = range(num_of_epochs) # Este valor esta anclado al numero de epocas plt.figure(1, figsize=(7, 5)) plt.plot(xc, train_loss) plt.plot(xc, val_loss) plt.xlabel('num of epochs') plt.ylabel('loss') plt.title('train_loss vs val_loss') plt.grid(True) plt.legend(['train', 'val']) plt.style.use(['classic']) # revisar que mas hay plt.savefig('vgg16_train_val_loss.jpg') plt.figure(2, figsize=(7, 5)) plt.plot(xc, train_acc) plt.plot(xc, val_acc) plt.xlabel('num of epochs') plt.ylabel('accuracy') plt.title('train_accuracy vs val_accuracy') plt.grid(True) plt.legend(['train', 'val'], loc=4) plt.style.use(['classic']) # revisar que mas hay plt.savefig('vgg16_train_val_acc.jpg') plt.show()