import numpy as np from matplotlib import pyplot as plt plt.rcParams['axes.unicode_minus'] = False plt.rcParams['font.sans-serif'] = ['SimHei'] plt.rcParams['lines.linewidth'] = 3 print('The progress of the CNN-Net:') print( 'AlexNet --> VGGNet --> InceptionNet/ResNet --> InceptionResNet --> NASNet --> MobileNet' ) print('-----------------------------------------') print('ResNet:') from keras.applications.resnet import preprocess_input, ResNet50 model_Res = ResNet50(include_top=False, weights='imagenet') from keras.applications.nasnet import NASNetMobile from keras.applications.nasnet import preprocess_input as p1 model_NasMobile = NASNetMobile(include_top=False, weights='imagenet') from keras.preprocessing.image import load_img, img_to_array import os path_cat = '/Users/mingyuexu/PycharmProjects/vgg_data_cat_c' img_name_cat = os.listdir(path_cat) data_cat = [] data_dog = [] data_cat1 = [] data_dog1 = [] for item in img_name_cat: img = load_img(f'/Users/mingyuexu/PycharmProjects/vgg_data_cat_c/{item}', target_size=(224, 224))
y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) if args.resume: print('resume from checkpoint') message = job_name + ' b_end' send_signal.send(args.node, 10002, message) model = keras.models.load_model(save_file) message = job_name + ' c_end' send_signal.send(args.node, 10002, message) else: print('train from start') model = models.Sequential() if '50' in args_model: base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None) elif '101' in args_model: base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None) elif '152' in args_model: base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None) #base_model.summary() #pdb.set_trace() #model.add(layers.UpSampling2D((2,2))) #model.add(layers.UpSampling2D((2,2))) #model.add(layers.UpSampling2D((2,2))) model.add(base_model) model.add(layers.Flatten()) #model.add(layers.BatchNormalization())
def Build_Model_ResNet(px_train , pSessionParameters , pTrainingParameters ): from keras.applications.resnet import ResNet50 from keras.applications.resnet import ResNet101 from keras.applications.resnet import ResNet152 from keras.applications.resnet_v2 import ResNet50V2 from keras.applications.resnet_v2 import ResNet101V2 from keras.applications.resnet_v2 import ResNet152V2 from keras.layers import Input # Builds a new ResNet model ''' Input parameters: px_train: training data to be used to set the input shape of the model pModelBuildParameters: Dict of Parameters to define how the model is built. Return parameters: model ''' BatchNormFlag = pSessionParameters['BatchNorm'] NoClasses = pSessionParameters['ModelBuildParameters']['NoClasses'] Activation = pSessionParameters['ModelBuildParameters']['Activation'] IncludeTopFlag = pSessionParameters['ModelBuildParameters']['IncludeTop'] Model = pSessionParameters['ModelBuildParameters']['Model'] Version = pSessionParameters['ModelBuildParameters']['Version'] if IncludeTopFlag: if Version==1: if 'ResNet50' in Model: conv_base = ResNet50(input_shape=(px_train.shape[1:]), weights=None, include_top=True , classes = NoClasses, pooling='avg') elif 'ResNet101' in Model: conv_base = ResNet101(input_shape=(px_train.shape[1:]), weights=None, include_top=True , classes = NoClasses, pooling='avg') elif 'ResNet152' in Model: conv_base = ResNet152(input_shape=(px_train.shape[1:]), weights=None, include_top=True , classes = NoClasses, pooling='avg') else: # Version 2 if 'ResNet50' in Model: conv_base = ResNet50V2(input_shape=(px_train.shape[1:]), weights=None, include_top=True , classes = NoClasses, pooling='avg') elif 'ResNet101' in Model: conv_base = ResNet101V2(input_shape=(px_train.shape[1:]), weights=None, include_top=True , classes = NoClasses, pooling='avg') elif 'ResNet152' in Model: conv_base = ResNet152V2(input_shape=(px_train.shape[1:]), weights=None, include_top=True , classes = NoClasses, pooling='avg') model = models.Sequential() model.add(conv_base) else: if Version==1: if 'ResNet50' in Model: conv_base = ResNet50(input_shape=(px_train.shape[1:]), weights=None, include_top=False , classes = NoClasses, pooling='avg') elif 'ResNet101' in Model: conv_base = ResNet101(input_shape=(px_train.shape[1:]), weights=None, include_top=False , classes = NoClasses, pooling='avg') elif 'ResNet152' in Model: conv_base = ResNet152(input_shape=(px_train.shape[1:]), weights=None, include_top=False , classes = NoClasses, pooling='avg') else: # Version 2 if 'ResNet50' in Model: conv_base = ResNet50V2(input_shape=(px_train.shape[1:]), weights=None, include_top=False , classes = NoClasses, pooling='avg') elif 'ResNet101' in Model: conv_base = ResNet101V2(input_shape=(px_train.shape[1:]), weights=None, include_top=False , classes = NoClasses, pooling='avg') elif 'ResNet152' in Model: conv_base = ResNet152V2(input_shape=(px_train.shape[1:]), weights=None, include_top=False , classes = NoClasses, pooling='avg') model = models.Sequential() model.add(conv_base) # Add Dense Layers and Dropout and BatchNorm layers based on ModelBuildParameters model = Build_Model_AddLayers(model , pSessionParameters ) model.add(layers.Dense(NoClasses, activation=Activation, name='dense_class')) return model
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Aug 18 00:55:01 2020 @author: ubuntu """ from keras.applications.resnet import ResNet50 from keras.models import Model import numpy as np from keras.preprocessing.image import ImageDataGenerator import pandas as pd model = ResNet50(weights='imagenet', include_top=True) model2 = Model(inputs=model.input, outputs=model.get_layer('avg_pool').output) #model2.summary() test_datagen = ImageDataGenerator(rescale=1. / 255) test_generator = test_datagen.flow_from_directory(directory=r"./Dataset/", target_size=(224, 224), color_mode="rgb", batch_size=1, class_mode=None, shuffle=False, seed=42) predict = model2.predict_generator(test_generator, 15851, verbose=1) # ---------- # create df
def cnn_model(model_name, img_size): """ Model definition using Xception net architecture """ input_size = (img_size, img_size, 3) if model_name == "xception": print("Loading Xception wts...") baseModel = Xception(weights="imagenet", include_top=False, input_shape=(img_size, img_size, 3)) elif model_name == "iv3": baseModel = InceptionV3(weights="imagenet", include_top=False, input_shape=(img_size, img_size, 3)) elif model_name == "irv2": baseModel = InceptionResNetV2(weights="imagenet", include_top=False, input_shape=(img_size, img_size, 3)) elif model_name == "resnet": baseModel = ResNet50(weights="imagenet", include_top=False, input_shape=(img_size, img_size, 3)) elif model_name == "nasnet": baseModel = NASNetLarge(weights="imagenet", include_top=False, input_shape=(img_size, img_size, 3)) elif model_name == "ef0": baseModel = EfficientNetB0(input_size, weights="imagenet", include_top=False) elif model_name == "ef5": baseModel = EfficientNetB5(input_size, weights="imagenet", include_top=False) headModel = baseModel.output headModel = GlobalAveragePooling2D()(headModel) headModel = Dense(512, activation="relu", kernel_initializer="he_uniform")(headModel) headModel = Dropout(0.4)(headModel) # headModel = Dense(512, activation="relu", kernel_initializer="he_uniform")( # headModel # ) # headModel = Dropout(0.5)(headModel) predictions = Dense(5, activation="softmax", kernel_initializer="he_uniform")(headModel) model = Model(inputs=baseModel.input, outputs=predictions) for layer in baseModel.layers: layer.trainable = False optimizer = Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004) model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"]) return model
y_col=y_name, class_mode="categorical", target_size=(224, 224), batch_size=8, validate_filenames=False) valid_generator = datagen.flow_from_dataframe( dataframe=df.ix[n // 2:], directory=None, x_col="Biopsy", y_col=y_name, class_mode="categorical", target_size=(224, 224), batch_size=8) #, validate_filenames=False) basemodel = ResNet50(include_top=False, weights='imagenet', pooling='avg', classes=2) # construct the head of the model that will be placed on top of the # the base model headModel = Dense(512, activation="relu")(basemodel.output) headModel = Dropout(0.5)(headModel) headModel = Dense(2, activation="softmax")(headModel) model = Model(inputs=basemodel.input, outputs=headModel) model.compile(optimizers.rmsprop(lr=0.0001), loss="categorical_crossentropy", metrics=["accuracy"]) STEP_SIZE_TRAIN = train_generator.n // train_generator.batch_size STEP_SIZE_VALID = valid_generator.n // valid_generator.batch_size model.fit_generator(generator=train_generator, steps_per_epoch=STEP_SIZE_TRAIN,
def train(dataset, architecture, task_name): ROOT_MODELS = '/home/dembanakh/.ml-manager/tasks-weights/' ROOT_DATASETS = '/home/dembanakh/.ml-manager/datasets/' if dataset == 'IMAGENET': if architecture == 'VGG16': from keras.applications.vgg16 import VGG16 model = VGG16(weights='imagenet') elif architecture == 'VGG19': from keras.applications.vgg19 import VGG19 model = VGG19(weights='imagenet') elif architecture == 'MobileNet': from keras.applications.mobilenet import MobileNet model = MobileNet(weights='imagenet') elif architecture == 'ResNet': from keras.applications.resnet import ResNet50, preprocess_input model = ResNet50(weights='imagenet') elif architecture == 'DenseNet': from keras.applications.densenet import DenseNet121, preprocess_input model = DenseNet121(weights='imagenet') else: return 0 model.compile(optimizer='adam', metrics=['accuracy'], loss='sparse_categorical_crossentropy') model.save(ROOT_MODELS + task_name + '.h5') else: input_shape = (224, 224, 3) batch_size = 1 # subject to change, but Azure server has little RAM import os import numpy as np from keras.preprocessing import image try: samples = [i for i in os.listdir(dataset + '/samples')] except OSError: print 'There is no such directory', dataset + '/samples' return 0 X = np.zeros((len(samples), input_shape[0], input_shape[1], input_shape[2])) # maybe depends on architecture y = np.zeros((len(samples), )) if architecture == 'VGG16': from keras.applications.vgg16 import VGG16, preprocess_input model = VGG16() for i in range(X.shape[0]): X[i] = preprocess_input(X[i]) elif architecture == 'VGG19': from keras.applications.vgg19 import VGG19, preprocess_input model = VGG19() for i in range(X.shape[0]): X[i] = preprocess_input(X[i]) elif architecture == 'MobileNet': from keras.applications.mobilenet import MobileNet, preprocess_input model = MobileNet() for i in range(X.shape[0]): X[i] = preprocess_input(X[i]) elif architecture == 'ResNet': from keras.applications.resnet import ResNet50, preprocess_input model = ResNet50() for i in range(X.shape[0]): X[i] = preprocess_input(X[i]) elif architecture == 'DenseNet': from keras.applications.densenet import DenseNet121, preprocess_input model = DenseNet121() for i in range(X.shape[0]): X[i] = preprocess_input(X[i]) else: return 0 for i, sample in enumerate(samples): try: img = image.load_img(dataset + '/samples/' + sample, target_size=input_shape) except IOError: print 'Failed to open file', dataset + '/samples/' + sample return 0 try: f_lbl = open( dataset + '/labels/' + sample.split('.')[0] + '.txt', 'r') except IOError: print 'Failed to open file', dataset + '/labels/' + sample.split( '.')[0] + '.txt' return 0 try: y[i] = int(f_lbl.read()) except ValueError: print 'File', dataset + '/labels/' + sample.split( '.')[0] + '.txt', 'doesn\'t contain integer' return 0 model.compile(optimizer='adam', metrics=['accuracy'], loss='sparse_categorical_crossentropy') model.fit(X, y, batch_size=batch_size) model.save(ROOT_MODELS + task_name + '.h5') return 1
type=int, default=64, help= 'Training batch size (larger batches are usually more efficient on GPUs)', ) flags = parser.parse_args() return flags flags = parse_args() num_classes = 9 # number of classes epochs = 50 model = Sequential() resNet = ResNet50(include_top=False, pooling='avg', weights='imagenet') # pretrained model # froze layers in resnet if flags.layers_fine_tune != 0: layers_fine_tune = -flags.layers_fine_tune for layer in resNet.layers[:layers_fine_tune]: layer.trainable = False else: for layer in resNet.layers: layer.trainable = False model.add(resNet) model.add(Dense(num_classes, activation='softmax')) # Classification layer # Compiling the model adam = Adam(lr=flags.learning_rate) model.compile(optimizer=adam,
print(f"End time is: {datetime.now()}") Inception_test_predict = model.predict(sub_test_images) # ResNet # talos runs indicated 0.01 as a good learning rate # divide by 100 to get actual learning rate architecture = 'ResNet' # change this!!!!!! print('architecture',architecture) random.seed(23) lr = 0.0001 optimizer = SGD base_model = ResNet50(weights='imagenet', include_top=False) # change this chunk!!!!! x = base_model.output x = GlobalAveragePooling2D()(x) x = Dense(1024, activation='relu')(x) predictions = Dense(num_classes, activation='softmax')(x) es = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0, mode='min', baseline=None, restore_best_weights=True) mc = ModelCheckpoint(model_pathname+'/exp_'+exp_id+'_'+architecture+'_best_model.h5', monitor='val_loss', mode='min', verbose=1, save_best_only=True) model = Model(inputs=base_model.input, outputs=predictions) model.compile(optimizer=optimizer(lr=lr),loss=loss_fx,metrics=metrics) History = model.fit_generator( imageLoader(sub_train_images, to_categorical(sub_train_labels),batch_size), steps_per_epoch=sub_train_images.shape[0] // batch_size,
def chosen_model(choice): global base_model if choice == 19: model_exist() else: while (1): print() print( 'Transfer Learning? - Will use pre-trained model with imagenet weights' ) print('y') print('n') weights_wanted = input() if weights_wanted.upper() != 'Y' and weights_wanted.upper() != 'N': print('ERROR: Please enter a valid choice') else: break if choice == 1: print('Selected Model = Xception') if weights_wanted.upper() == 'Y': base_model = Xception(weights='imagenet', include_top=False) else: base_model = Xception(weights=None, include_top=False) if choice == 2: print('Selected Model = VGG16') if weights_wanted.upper() == 'Y': base_model = VGG16(weights='imagenet', include_top=False) else: base_model = VGG16(weights=None, include_top=False) if choice == 3: print('Selected Model = VGG19') if weights_wanted.upper() == 'Y': base_model = VGG19(weights='imagenet', include_top=False) else: base_model = VGG19(weights=None, include_top=False) if choice == 4: print('Selected Model = ResNet50') if weights_wanted.upper() == 'Y': base_model = ResNet50(weights='imagenet', include_top=False) else: base_model = ResNet50(weights=None, include_top=False) if choice == 5: print('Selected Model = ResNet101') if weights_wanted.upper() == 'Y': base_model = ResNet101(weights='imagenet', include_top=False) else: base_model = ResNet101(weights=None, include_top=False) if choice == 6: print('Selected Model = ResNet152') if weights_wanted.upper() == 'Y': base_model = ResNet152(weights='imagenet', include_top=False) else: base_model = ResNet152(weights=None, include_top=False) if choice == 7: print('Selected Model = ResNet50V2') if weights_wanted.upper() == 'Y': base_model = ResNet50V2(weights='imagenet', include_top=False) else: base_model = ResNet50V2(weights=None, include_top=False) if choice == 8: print('Selected Model = ResNet101V2') if weights_wanted.upper() == 'Y': base_model = ResNet101V2(weights='imagenet', include_top=False) else: base_model = ResNet101V2(weights=None, include_top=False) if choice == 9: print('Selected Model = ResNet152V2') if weights_wanted.upper() == 'Y': base_model = ResNet152V2(weights='imagenet', include_top=False) else: base_model = ResNet152V2(weights=None, include_top=False) if choice == 10: print('Selected Model = InceptionV3') if weights_wanted.upper() == 'Y': base_model = InceptionV3(weights='imagenet', include_top=False) else: base_model = InceptionV3(weights=None, include_top=False) if choice == 11: print('Selected Model = InceptionResNetV2') if weights_wanted.upper() == 'Y': base_model = InceptionResNetV2(weights='imagenet', include_top=False) else: base_model = InceptionResNetV2(weights=None, include_top=False) if choice == 12: print('Selected Model = MobileNet') if weights_wanted.upper() == 'Y': base_model = MobileNet(weights='imagenet', include_top=False) else: base_model = MobileNet(weights=None, include_top=False) if choice == 13: print('Selected Model = MobileNetV2') if weights_wanted.upper() == 'Y': base_model = MobileNetV2(weights='imagenet', include_top=False) else: base_model = MobileNetV2(weights=None, include_top=False) if choice == 14: print('Selected Model = DenseNet121') if weights_wanted.upper() == 'Y': base_model = DenseNet121(weights='imagenet', include_top=False) else: base_model = DenseNet121(weights=None, include_top=False) if choice == 15: print('Selected Model = DenseNet169') if weights_wanted.upper() == 'Y': base_model = DenseNet169(weights='imagenet', include_top=False) else: base_model = DenseNet169(weights=None, include_top=False) if choice == 16: print('Selected Model = DenseNet201') if weights_wanted.upper() == 'Y': base_model = DenseNet201(weights='imagenet', include_top=False) else: base_model = DenseNet201(weights=None, include_top=False) if choice == 17: print('Selected Model = NASNetLarge') if weights_wanted.upper() == 'Y': base_model = NASNetLarge(weights='imagenet', include_top=False) else: base_model = NASNetLarge(weights=None, include_top=False) if choice == 18: print('Selected Model = NASNetMobile') if weights_wanted.upper() == 'Y': base_model = NASNetMobile(weights='imagenet', include_top=False) else: base_model = NASNetMobile(weights=None, include_top=False) CLASSES = len(os.listdir('data/train')) print('Number of Classes = {}'.format(CLASSES)) x = base_model.output x = GlobalAveragePooling2D(name='avg_pool')(x) x = Dropout(0.4)(x) predictions = Dense(CLASSES, activation='softmax')(x) model = Model(inputs=base_model.input, outputs=predictions) for layer in base_model.layers: layer.trainable = False model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) training(model)
valid_path = '../large_files/fruits-360-small/Validation' # useful for getting number of files image_files = glob(train_path + '/*/*.jp*g') valid_image_files = glob(valid_path + '/*/*.jp*g') # useful for getting number of classes folders = glob(train_path + '/*') # look at an image for fun plt.imshow(image.load_img(np.random.choice(image_files))) plt.show() # add preprocessing layer to the front of VGG res = ResNet50(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False) # don't train existing weights for layer in res.layers: layer.trainable = False # our layers - you can add more if you want x = Flatten()(res.output) # x = Dense(1000, activation='relu')(x) prediction = Dense(len(folders), activation='softmax')(x) # create a model object model = Model(inputs=res.input, outputs=prediction) # view the structure of the model
from keras.preprocessing.sequence import pad_sequences from keras.utils import to_categorical from keras.layers import Input, Dense, Dropout, Embedding, LSTM from keras.layers.merge import add # In[3]: model = load_model("model_weights/model_7.h5") # In[6]: from PIL import Image # In[7]: model_temp = ResNet50(weights="imagenet", input_shape=(224, 224, 3)) # In[8]: model_resnet = Model(model_temp.input, model_temp.layers[-2].output) # In[9]: def preprocess_img(img): img = Image.open(img) img = img.resize((224, 224)) img = image.img_to_array(img) img = np.expand_dims(img, axis=0) img = preprocess_input(img) #Normalisation return img
def resNet(xtrain, ytrain, xtest, ytest): batch = 32 datagen = ImageDataGenerator(preprocessing_function=cutout, horizontal_flip=True, width_shift_range=0.3, height_shift_range=0.3, samplewise_center=True) datagen.fit(xtrain) # get the base pre-trained model, trained on imagenet # don't include dense layers - want to modify # this is the image size we need (imagenet = 224x224, double 32 3 times ->256) resnet = ResNet50(weights='imagenet', include_top=False, input_shape=(256, 256, 3)) for layer in resnet.layers: if isinstance(layer, BatchNormalization): layer.trainable = True else: layer.trainable = False model = Sequential() # must upsample images to get them to appropriate size model.add(UpSampling2D()) model.add(UpSampling2D()) model.add(UpSampling2D()) model.add(resnet) model.add(GlobalAveragePooling2D()) model.add(Dense(256, activation='elu')) # model.add(Dropout(0.2)) model.add(BatchNormalization()) model.add(Dense(100, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=Adam(), metrics=['accuracy']) # for testing purposes # num = 100 # xtest = xtest[0:num, :, :, :] # ytest = ytest[0:num] # # xtrain = xtrain[0:num, :, :, :] # ytrain = ytrain[0:num] history = model.fit(datagen.flow(xtrain, ytrain, batch_size=batch), epochs=4, verbose=1, validation_data=(xtest, ytest)) score = model.evaluate(xtest, ytest, verbose=0) print(score[1]) model.save('preTrainedResNet50AllAugELU5.h5') # plot and save graphs of accuracy and loss plt.plot(history.history['accuracy']) plt.plot(history.history['val_accuracy']) plt.title('Model Accuracy') plt.ylabel('Accuracy') plt.xlabel('Epoch') plt.legend(['Training', 'Validation'], loc='upper left') plt.plot() plt.savefig("Accuracy.png") plt.clf() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('Model Loss') plt.ylabel('Loss') plt.xlabel('Epoch') plt.legend(['Training', 'Validation'], loc='upper left') plt.plot() plt.savefig("Loss.png")