def extract_features(directory, is_attention=False): # load the model if is_attention: model = ResNet152() model.layers.pop() # extract final 49x512 conv layer for context vectors final_conv = Reshape([49, 512])(model.layers[-4].output) model = Model(inputs=model.inputs, outputs=final_conv) print(model.summary()) features = dict() else: model = ResNet152() # re-structure the model model.layers.pop() model = Model(inputs=model.inputs, outputs=model.layers[-1].output) print(model.summary()) # extract features from each photo features = dict() for name in progressbar(listdir(directory)): # ignore README if name == 'README.md': continue filename = directory + '/' + name image = load_image(filename) # extract features feature = model.predict(image, verbose=0) # get image id image_id = name.split('.')[0] # store feature features[image_id] = feature print('>%s' % name) return features
def main(): opt = parse_args() print(json.dumps(vars(opt), indent = 2)) input_path = opt.data_path print("Input path:"+str(input_path)) output_path = opt.feature_path print("Output path:"+(output_path)) #loading pretrain resnet 152 resnet152_model = ResNet152(ResNet152(include_top=False, weights='imagenet', input_tensor=None, input_shape=None, pooling=None,)) count = 0 features_filename = open(output_path,"w") for filename in os.listdir(input_path): if filename.endswith(".jpg") or filename.endswith(".png"): image_path = os.path.join(input_path,filename) print("Image path:",image_path) img = image.load_img(image_path, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) features = resnet152_model.predict(x)[0] output = filename+" "+" ".join(np.asarray(features, dtype=str))+"\n" # filename_features_format = np.insert(features.astype(str), 0, str(filename), axis=0) features_filename.write(output) # np.savetxt(features_filename,filename_features_format,fmt='%s') count += 1 print("Processing "+str(count)+". Filename: "+filename)
def GenerateVisualFeatures(data_path, offset=0, limit=None, model=None): article_paths = [ join(data_path, f) for f in listdir(data_path) if isdir(join(data_path, f)) ] limit = limit if limit else len(article_paths) - offset model = model if model else ResNet152(weights='imagenet', include_top=False) for i in range(offset, offset + limit): path = article_paths[i] print(i, path) meta_path = join(path, 'img/', 'meta.json') meta_arr = _getImagesMeta(meta_path) for meta in meta_arr: if 'features' in meta: continue if meta['filename'][-4:].lower() != ".jpg": continue img_path = join(path, 'img/', meta['filename']) try: features = _getImageFeatures(model, img_path)['features'] meta['features'] = [str(f) for f in features] except Exception as e: print("exception", str(e)) print(img_path) continue _dump(meta_path, json.dumps({"img_meta": meta_arr}))
def evaluation(args): path_img_val = '../datasets/ilsvrc2012/images/val/' path_val_info = '../datasets/ilsvrc2012/images/val.txt' if args.model == 'vgg16': model = VGG16(weights='imagenet') model.summary() elif args.model == 'resnet152': model = ResNet152(weights='imagenet') model.summary() elif args.model == 'resnet152v2': model = ResNet152V2(weights='imagenet') model.summary() elif args.model == 'inceptionresnetv2': model = InceptionResNetV2(weights='imagenet') model.summary() elif args.model == 'densenet201': model = DenseNet201(weights='imagenet') model.summary() elif args.model == 'nasnetlarge': model = NASNetLarge(weights='imagenet') model.summary() name, label = load_header_imagenet(load_file(path_val_info)) pred = list() for i, n in enumerate(name): x = preprocessing_imagenet(path_img_val + n, args) pred.append(np.argmax(model.predict(x), axis=1)[0]) if i % 1000 == 0: print(n) correct = len([p for p, l in zip(pred, label) if p == l]) print('Accuracy of the IMAGENET dataset using model %s: %.4f' % (args.model, correct / len(label)))
def set_base_model(self): stringWeights = None if (self.imageNetWeights): print("Using imagenet weights...") stringWeights = "imagenet" else: print("Using random initialization of the weights...") stringWeights = None if (self.base_model_type == "vgg16"): self.base_model = keras.applications.vgg16.VGG16( include_top=False, weights=stringWeights, input_shape=self.input_shape, pooling='max') elif (self.base_model_type == "resnet50"): self.base_model = ResNet50(include_top=False, weights=stringWeights, input_shape=self.input_shape, pooling='max') elif (self.base_model_type == "resnet101"): self.base_model = ResNet101(include_top=False, weights=stringWeights, input_shape=self.input_shape, pooling='max') elif (self.base_model_type == "resnet152"): self.base_model = ResNet152(include_top=False, weights=stringWeights, input_shape=self.input_shape, pooling='max') elif (self.base_model_type == "inceptionv3"): self.base_model = keras.applications.inception_v3.InceptionV3( include_top=False, weights=stringWeights, pooling='max')
def build_cnn_model(): base_model = ResNet152((224,224,3), weights="imagenet") cnn_model = Model(input=base_model.input, output=base_model.get_layer('avg_pool').output) opt = optimizers.SGD(lr=0.01, decay=1e-4, momentum=0.9) cnn_model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) return cnn_model
def make_encoder(input, name='resnet50', pretrained=True): if name == 'resnet18': from classification_models.keras import Classifiers ResNet18, _ = Classifiers.get('resnet18') model = ResNet18( weights='imagenet' if pretrained else None, input_tensor=input, include_top=False ) elif name == 'resnet50': from keras.applications.resnet import ResNet50 model = ResNet50( weights='imagenet' if pretrained else None, input_tensor=input, include_top=False ) elif name == 'resnet101': from keras.applications.resnet import ResNet101 model = ResNet101( weights='imagenet' if pretrained else None, input_tensor=input, include_top=False ) elif name == 'resnet152': from keras.applications.resnet import ResNet152 model = ResNet152( weights='imagenet' if pretrained else None, input_tensor=input, include_top=False ) elif name == 'vgg16': from keras.applications.vgg16 import VGG16 model = VGG16( weights='imagenet' if pretrained else None, input_tensor=input, include_top=False ) elif name == 'vgg19': from keras.applications.vgg19 import VGG19 model = VGG19( weights='imagenet' if pretrained else None, input_tensor=input, include_top=False ) else: raise Exception(f'unknown encoder {name}') return model
def train(self): # re-size all the images to this IMAGE_SIZE = [224, 224] # add preprocessing layer to the front of VGG resnet = ResNet152(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False) # don't train existing weights for layer in resnet.layers: layer.trainable = False # useful for getting number of classes folders = glob(self.train_path + '*') # our layers - you can add more if you want x = Flatten()(resnet.output) prediction = Dense(len(folders), activation='sigmoid')(x) # create a model object model = Model(inputs=resnet.input, outputs=prediction) # view the structure of the model model.summary() # tell the model what cost and optimization method to use model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # Use the Image Data Generator to import the images from the dataset train_datagen = ImageDataGenerator(rescale=1. / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True) test_datagen = ImageDataGenerator(rescale=1. / 255) training_set = train_datagen.flow_from_directory( self.train_path, target_size=(224, 224), batch_size=32, class_mode='categorical') test_set = test_datagen.flow_from_directory(self.train_path, target_size=(224, 224), batch_size=32, class_mode='categorical') # fit the model r = model.fit_generator(training_set, validation_data=test_set, epochs=10, steps_per_epoch=2, validation_steps=len(test_set)) model.save(self.model_save_path)
def load_model(model): K.clear_session() if model == "ResNet152": base_model = ResNet152(include_top=False, weights='imagenet', input_shape=(299, 299, 3), pooling="max") if model == "InceptionV3": base_model = InceptionV3(include_top=False, weights='imagenet', input_shape=(299, 299, 3), pooling="max") if model == "InceptionResNetV2": base_model = InceptionResNetV2(include_top=False, weights='imagenet', input_shape=(299, 299, 3), pooling="max") if model == "Xception": base_model = Xception(include_top=False, weights='imagenet', input_shape=(299, 299, 3), pooling="max") if model == "VGG": base_model = VGG19(include_top=False, weights='imagenet', input_shape=(299, 299, 3), pooling="max") base_model.summary() x = base_model.output dense1_ = Dense(512, activation='relu') dense1 = dense1_(x) x = Dropout(0.2)(dense1) dense2 = Dense(256, activation='relu')(x) x = Dropout(0.2)(dense2) dense3 = Dense(128, activation='relu')(x) pred_output = Dense(1, activation='sigmoid')(dense3) model = Model(inputs=base_model.input, outputs=[pred_output]) model.summary() return model
def Image_model(): ''' Description: build Image model Arguments: Returns: Pretrained image model ''' print("Create Image Model ...") model = ResNet152() model.input model.layers.pop() new_layer = Dense(1024, name='FC-1024') inp = model.input out = new_layer(model.layers[-1].output) model = Model(inp, out) return model
class VGG16Model: # Process Model # model = VGG19( model = ResNet152(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000) # model = MobileNetV2(weights='imagenet', include_top=False) def infern(self, img: np.ndarray): x = np.expand_dims(img, axis=0) x = preprocess_input(x) predictions = self.model.predict(x) return decode_predictions(predictions, top=3) def test(self): # image = load_img('../4051378654_238ca94313.jpg', target_size=(224, 224)) image = load_img('../1480654305.jpg', target_size=(224, 224)) image = img_to_array(image) print(self.infern(image))
def ResNet(input_shape = (const.X_Height, const.X_Width, const.X_Channels), classes = const.Y_Classes, Layers = 50, source = 'keras', weights = 'imagenet'): """ create RestNet model Arguments: input_shape = Height, Width and channels for each input image classes = how many classes model will be trainned on Layers: how many layers; should be one of [18, 34, 50, 101, 152] source: 'keras' (use built-in model) or 'manual' (use my custom model above) weights: 'imagenet' (load weights from keras lib) or None (no weights loading) 'imagenet' only available if layers in [50,101,152] """ # validate parameters if (Layers not in [18, 34, 50, 101, 152]): raise ValueError('Invalid layer number: ' + str(Layers) + ' (must be one of [18, 34, 50, 101, 152]).') if (source not in ['keras', 'manual']): raise ValueError('Invalid model source: ' + str(source) + " (must be 'keras' or 'manual'.") if (weights not in [None, 'imagenet']): raise ValueError('Invalid weights definition: ' + str(weights) + " (must be None or 'imagenet'.") if (Layers in [18, 34]): if (source == 'keras'): raise ValueError("No keras model available for small ResNets. 'source' parameter must be 'manual' when layers are 18 or 34.") if (weights != None): raise ValueError("No weights available for small ResNets. 'weights' Parameter must be None when layers are 18 or 34.") # build model if (source == 'keras'): # load base model from keras if (Layers == 50): from keras.applications.resnet import ResNet50 baseModel = ResNet50(include_top = False, weights = weights, input_shape = input_shape) elif (Layers == 101): from keras.applications.resnet import ResNet101 baseModel = ResNet101(include_top = False, weights = weights, input_shape = input_shape) elif (Layers == 152): from keras.applications.resnet import ResNet152 baseModel = ResNet152(include_top = False, weights = weights, input_shape = input_shape) elif (source == 'manual'): # load model from my implementation if (Layers in [18,34]): baseModel = ResNetSmall(input_shape=input_shape, classes=classes, Layers=Layers) else: baseModel = ResNetLarge(input_shape=input_shape, classes=classes, Layers=Layers, weights=weights) # add final layers to built-in keras model from keras.models import Model from keras.layers import Dense, Flatten, AveragePooling2D X = baseModel.output X = AveragePooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None)(X) X = Flatten()(X) Preds = Dense(const.Y_Classes, activation='softmax', name='fc' + str(const.Y_Classes))(X) model = Model(inputs=baseModel.input, outputs=Preds) # return the model return model
def chosen_model(choice): global base_model if choice == 19: model_exist() else: while (1): print() print( 'Transfer Learning? - Will use pre-trained model with imagenet weights' ) print('y') print('n') weights_wanted = input() if weights_wanted.upper() != 'Y' and weights_wanted.upper() != 'N': print('ERROR: Please enter a valid choice') else: break if choice == 1: print('Selected Model = Xception') if weights_wanted.upper() == 'Y': base_model = Xception(weights='imagenet', include_top=False) else: base_model = Xception(weights=None, include_top=False) if choice == 2: print('Selected Model = VGG16') if weights_wanted.upper() == 'Y': base_model = VGG16(weights='imagenet', include_top=False) else: base_model = VGG16(weights=None, include_top=False) if choice == 3: print('Selected Model = VGG19') if weights_wanted.upper() == 'Y': base_model = VGG19(weights='imagenet', include_top=False) else: base_model = VGG19(weights=None, include_top=False) if choice == 4: print('Selected Model = ResNet50') if weights_wanted.upper() == 'Y': base_model = ResNet50(weights='imagenet', include_top=False) else: base_model = ResNet50(weights=None, include_top=False) if choice == 5: print('Selected Model = ResNet101') if weights_wanted.upper() == 'Y': base_model = ResNet101(weights='imagenet', include_top=False) else: base_model = ResNet101(weights=None, include_top=False) if choice == 6: print('Selected Model = ResNet152') if weights_wanted.upper() == 'Y': base_model = ResNet152(weights='imagenet', include_top=False) else: base_model = ResNet152(weights=None, include_top=False) if choice == 7: print('Selected Model = ResNet50V2') if weights_wanted.upper() == 'Y': base_model = ResNet50V2(weights='imagenet', include_top=False) else: base_model = ResNet50V2(weights=None, include_top=False) if choice == 8: print('Selected Model = ResNet101V2') if weights_wanted.upper() == 'Y': base_model = ResNet101V2(weights='imagenet', include_top=False) else: base_model = ResNet101V2(weights=None, include_top=False) if choice == 9: print('Selected Model = ResNet152V2') if weights_wanted.upper() == 'Y': base_model = ResNet152V2(weights='imagenet', include_top=False) else: base_model = ResNet152V2(weights=None, include_top=False) if choice == 10: print('Selected Model = InceptionV3') if weights_wanted.upper() == 'Y': base_model = InceptionV3(weights='imagenet', include_top=False) else: base_model = InceptionV3(weights=None, include_top=False) if choice == 11: print('Selected Model = InceptionResNetV2') if weights_wanted.upper() == 'Y': base_model = InceptionResNetV2(weights='imagenet', include_top=False) else: base_model = InceptionResNetV2(weights=None, include_top=False) if choice == 12: print('Selected Model = MobileNet') if weights_wanted.upper() == 'Y': base_model = MobileNet(weights='imagenet', include_top=False) else: base_model = MobileNet(weights=None, include_top=False) if choice == 13: print('Selected Model = MobileNetV2') if weights_wanted.upper() == 'Y': base_model = MobileNetV2(weights='imagenet', include_top=False) else: base_model = MobileNetV2(weights=None, include_top=False) if choice == 14: print('Selected Model = DenseNet121') if weights_wanted.upper() == 'Y': base_model = DenseNet121(weights='imagenet', include_top=False) else: base_model = DenseNet121(weights=None, include_top=False) if choice == 15: print('Selected Model = DenseNet169') if weights_wanted.upper() == 'Y': base_model = DenseNet169(weights='imagenet', include_top=False) else: base_model = DenseNet169(weights=None, include_top=False) if choice == 16: print('Selected Model = DenseNet201') if weights_wanted.upper() == 'Y': base_model = DenseNet201(weights='imagenet', include_top=False) else: base_model = DenseNet201(weights=None, include_top=False) if choice == 17: print('Selected Model = NASNetLarge') if weights_wanted.upper() == 'Y': base_model = NASNetLarge(weights='imagenet', include_top=False) else: base_model = NASNetLarge(weights=None, include_top=False) if choice == 18: print('Selected Model = NASNetMobile') if weights_wanted.upper() == 'Y': base_model = NASNetMobile(weights='imagenet', include_top=False) else: base_model = NASNetMobile(weights=None, include_top=False) CLASSES = len(os.listdir('data/train')) print('Number of Classes = {}'.format(CLASSES)) x = base_model.output x = GlobalAveragePooling2D(name='avg_pool')(x) x = Dropout(0.4)(x) predictions = Dense(CLASSES, activation='softmax')(x) model = Model(inputs=base_model.input, outputs=predictions) for layer in base_model.layers: layer.trainable = False model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) training(model)
labels = [ int(str(p).split("/")[1].split("_")[0][-2:]) for p in image_paths ] # Commented models. For testing, please check the README to adjust image dimensions and feature vector size #model = Xception(include_top=False, weights='imagenet', pooling='avg') #model = NASNetLarge(include_top=False, weights='imagenet', pooling='avg') #model = InceptionResNetV2(include_top=False, weights='imagenet', pooling='avg') #model = VGG16(include_top=False, weights='imagenet', pooling='avg') #model = cifar100vgg(train=False) #my_layer = model.model.layers[56] #model = Model(model.model.input, outputs=my_layer.output) # Define the model model = ResNet152(include_top=False, weights='imagenet', pooling='avg') n_clusters = 58 model.layers[0].trainable = False dims = [224, 224] vect_len = 2048 # Define list to store vector values feature_vects = np.zeros((len(image_paths), vect_len), dtype=float) # Extract vectors for idx, img in enumerate(image_paths): # Print print("Extracting vector features for image: " + str(idx)) # Load and reshape the image to input to the network img = cv2.resize(cv2.imread(img), (dims[0], dims[1]))
def extract_features(nn_model, fine_tune=False): if fine_tune is True: if nn_model == 'resnet50': model = ResNet50(include_top=False, weights=None, pooling='avg') elif nn_model == 'resnet101': model = ResNet101(include_top=False, weights=None, pooling='avg') elif nn_model == 'resnet152': model = ResNet152(include_top=False, weights=None, pooling='avg') elif nn_model == 'densenet121': model = DenseNet121(include_top=False, weights=None, pooling='avg') elif nn_model == 'densenet169': model = DenseNet169(include_top=False, weights=None, pooling='avg') elif nn_model == 'densenet201': model = DenseNet201(include_top=False, weights=None, pooling='avg') else: raise NotImplementedError("The NN model is not implemented!") model.load_weights('./finetune/' + nn_model + '/finetune_weights_50_epoch.h5', by_name=True) else: if nn_model == 'resnet50': model = ResNet50(include_top=False, weights='imagenet', pooling='avg') elif nn_model == 'resnet101': model = ResNet101(include_top=False, weights='imagenet', pooling='avg') elif nn_model == 'resnet152': model = ResNet152(include_top=False, weights='imagenet', pooling='avg') elif nn_model == 'densenet121': model = DenseNet121(include_top=False, weights='imagenet', pooling='avg') elif nn_model == 'densenet169': model = DenseNet169(include_top=False, weights='imagenet', pooling='avg') elif nn_model == 'densenet201': model = DenseNet201(include_top=False, weights='imagenet', pooling='avg') else: raise NotImplementedError("The NN model is not implemented!") ImgPath = './dataset/img' ProcessedPath = './dataset/feature_' + nn_model Lastlist = os.listdir(ImgPath) sum = 0 for lastfolder in Lastlist: LastPath = os.path.join(ImgPath, lastfolder) savepath = os.path.join(ProcessedPath, lastfolder) imagelist = os.listdir(LastPath) for image1 in imagelist: sum += 1 print(image1) print('sum is ', sum) image_pre, ext = os.path.splitext(image1) imgfile = LastPath + '/' + image1 img = image.load_img(imgfile, target_size=(64, 64)) x = image.img_to_array(img) # shape:(64,64,3) x = np.expand_dims(x, axis=0) x = preprocess_input(x) # shape:(1,64,64,3) print(x.shape) want = model.predict(x) print(np.shape(want)) # shape:(1,2048) # normalize a = np.min(want) b = np.max(want) want = (want - a) / (b - a) if (not os.path.exists(savepath)): os.makedirs(savepath) np.save(os.path.join(savepath, image_pre + '.npz'), want)
def __init__(self): self.model = ResNet152(weights='imagenet', include_top=False)
from keras import backend as K sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=True)) K.tensorflow_backend._get_available_gpus() ## #Se establecen los paths path_csv = '/media/user_home2/vision2020_01/Data/iWildCam2019/train.csv' path_train = '/media/user_home2/vision2020_01/Data/iWildCam2019/train_images' ## #Se crea el modelo # create the base pre-trained model base_model = ResNet152(weights='imagenet', include_top=False) # add a global spatial average pooling layer x = base_model.output x = GlobalAveragePooling2D()(x) # let's add a fully-connected layer x = Dense(1024, activation='relu')(x) # and a logistic layer -- let's say we have 200 classes predictions = Dense(14, activation='softmax')(x) # this is the model we will train model = Model(inputs=base_model.input, outputs=predictions) # first: train only the top layers (which were randomly initialized) # i.e. freeze all convolutional InceptionV3 layers for layer in base_model.layers:
def Build_Model_ResNet(px_train , pSessionParameters , pTrainingParameters ): from keras.applications.resnet import ResNet50 from keras.applications.resnet import ResNet101 from keras.applications.resnet import ResNet152 from keras.applications.resnet_v2 import ResNet50V2 from keras.applications.resnet_v2 import ResNet101V2 from keras.applications.resnet_v2 import ResNet152V2 from keras.layers import Input # Builds a new ResNet model ''' Input parameters: px_train: training data to be used to set the input shape of the model pModelBuildParameters: Dict of Parameters to define how the model is built. Return parameters: model ''' BatchNormFlag = pSessionParameters['BatchNorm'] NoClasses = pSessionParameters['ModelBuildParameters']['NoClasses'] Activation = pSessionParameters['ModelBuildParameters']['Activation'] IncludeTopFlag = pSessionParameters['ModelBuildParameters']['IncludeTop'] Model = pSessionParameters['ModelBuildParameters']['Model'] Version = pSessionParameters['ModelBuildParameters']['Version'] if IncludeTopFlag: if Version==1: if 'ResNet50' in Model: conv_base = ResNet50(input_shape=(px_train.shape[1:]), weights=None, include_top=True , classes = NoClasses, pooling='avg') elif 'ResNet101' in Model: conv_base = ResNet101(input_shape=(px_train.shape[1:]), weights=None, include_top=True , classes = NoClasses, pooling='avg') elif 'ResNet152' in Model: conv_base = ResNet152(input_shape=(px_train.shape[1:]), weights=None, include_top=True , classes = NoClasses, pooling='avg') else: # Version 2 if 'ResNet50' in Model: conv_base = ResNet50V2(input_shape=(px_train.shape[1:]), weights=None, include_top=True , classes = NoClasses, pooling='avg') elif 'ResNet101' in Model: conv_base = ResNet101V2(input_shape=(px_train.shape[1:]), weights=None, include_top=True , classes = NoClasses, pooling='avg') elif 'ResNet152' in Model: conv_base = ResNet152V2(input_shape=(px_train.shape[1:]), weights=None, include_top=True , classes = NoClasses, pooling='avg') model = models.Sequential() model.add(conv_base) else: if Version==1: if 'ResNet50' in Model: conv_base = ResNet50(input_shape=(px_train.shape[1:]), weights=None, include_top=False , classes = NoClasses, pooling='avg') elif 'ResNet101' in Model: conv_base = ResNet101(input_shape=(px_train.shape[1:]), weights=None, include_top=False , classes = NoClasses, pooling='avg') elif 'ResNet152' in Model: conv_base = ResNet152(input_shape=(px_train.shape[1:]), weights=None, include_top=False , classes = NoClasses, pooling='avg') else: # Version 2 if 'ResNet50' in Model: conv_base = ResNet50V2(input_shape=(px_train.shape[1:]), weights=None, include_top=False , classes = NoClasses, pooling='avg') elif 'ResNet101' in Model: conv_base = ResNet101V2(input_shape=(px_train.shape[1:]), weights=None, include_top=False , classes = NoClasses, pooling='avg') elif 'ResNet152' in Model: conv_base = ResNet152V2(input_shape=(px_train.shape[1:]), weights=None, include_top=False , classes = NoClasses, pooling='avg') model = models.Sequential() model.add(conv_base) # Add Dense Layers and Dropout and BatchNorm layers based on ModelBuildParameters model = Build_Model_AddLayers(model , pSessionParameters ) model.add(layers.Dense(NoClasses, activation=Activation, name='dense_class')) return model
print('train from start') model = models.Sequential() if '50' in args_model: base_model = ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None) elif '101' in args_model: base_model = ResNet101(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None) elif '152' in args_model: base_model = ResNet152(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None) #base_model.summary() #pdb.set_trace() #model.add(layers.UpSampling2D((2,2))) #model.add(layers.UpSampling2D((2,2))) #model.add(layers.UpSampling2D((2,2))) model.add(base_model) model.add(layers.Flatten()) #model.add(layers.BatchNormalization()) #model.add(layers.Dense(128, activation='relu')) #model.add(layers.Dropout(0.5)) #model.add(layers.BatchNormalization())
def ResNet101_nn(train_path, vali_path, storageFileName): start_time = time() monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5, verbose=1, mode='auto', restore_best_weights=True) train_datagen = ImageDataGenerator(rescale=1. / 255, rotation_range=40, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest') vali_datagen = ImageDataGenerator(rescale=1. / 255, ) train_generator = train_datagen.flow_from_directory( train_path, target_size=(256, 256), batch_size=8, ) vail_generator = vali_datagen.flow_from_directory( vali_path, target_size=(256, 256), batch_size=8, ) baseNet = ResNet152(weights='imagenet', include_top=False, input_shape=INPUT_SHAPE) net = baseNet.output net = Flatten()(net) net = Dropout(0.2)(net) net = Dense(NUM_CLASSES, activation='softmax')(net) ResNet = Model(inputs=baseNet.inputs, outputs=net) for layer in ResNet.layers[:FREEZE_LAYERS]: layer.trainable = False for layer in ResNet.layers[FREEZE_LAYERS:]: layer.trainable = True ResNet.compile(optimizer=optimizers.RMSprop(lr=1e-5), loss='categorical_crossentropy', metrics=['accuracy']) history = ResNet.fit_generator(train_generator, steps_per_epoch=50, epochs=30, validation_data=vail_generator, validation_steps=50, callbacks=[monitor]) ResNet.save(osp.join("result", "resnet152_" + storageFileName + "_3.h5")) plt_LineChart(history=history, netName="resnet152_" + storageFileName + ".jpg") end_time = time() mins = (end_time - start_time) // 60 secs = (end_time - start_time) % 60 print("ResNet Execute time: {}:{:.2f}".format(mins, secs))