def get_model(input_shape, class_num): args = parse_args() if args.model == 'resnet50': model = ResNet50(include_top=True, weights=None, input_tensor=None, input_shape=input_shape, pooling=None, mode=args.mode, classes=class_num) elif args.model == 'resnet50_imagenet': model = ResNet50(include_top=True, weights='imagenet', input_tensor=None, input_shape=input_shape, pooling=None, mode=args.mode, classes=class_num) elif args.model == 'pretrained': modelName = os.path.join(os.getcwd(), 'weights', args.pretrained) model = load_model(modelName) rmsprop = RMSprop(lr=1e-05, rho=0.99, epsilon=1e-08, decay=0.001) model.compile(optimizer=rmsprop, loss='categorical_crossentropy', metrics=['categorical_accuracy']) print model.summary() return model
def calc_features(input_path, output_path, n_iterations=100, overwrite=True): model = ResNet50(weights='imagenet') layer_name = 'avg_pool' intermediate_layer_model = Model(input=model.input, output=model.get_layer(layer_name).output) i = 0 for folder in glob(input_path + '*'): print(i) if i >= n_iterations: break output_basename = os.path.basename(os.path.normpath(folder)) output_name = output_path + output_basename + "_features" if not overwrite and os.path.exists(output_name + ".npy"): i += 1 continue batch = get_data_id(folder) img = np.ndarray([len(batch), 3, 224, 224], dtype=np.float32) img = batch intermediate_output = intermediate_layer_model.predict(img, batch_size=20) np.save(output_name, intermediate_output) i += 1
def finetuned_resnet(include_top, weights_dir): """ :param include_top: True for training, False for generating intermediate results for LSTM cell :param weights_dir: path to load finetune_resnet.h5 :return: """ base_model = ResNet50(include_top=False, weights='imagenet', input_shape=IMSIZE) for layer in base_model.layers: layer.trainable = False x = base_model.output x = Flatten()(x) x = Dense(2048, activation='relu')(x) x = Dropout(0.5)(x) x = Dense(1024, activation='relu')(x) x = Dropout(0.5)(x) if include_top: x = Dense(N_CLASSES, activation='softmax')(x) model = Model(inputs=base_model.input, outputs=x) if os.path.exists(weights_dir): model.load_weights(weights_dir, by_name=True) return model
def model_fn(FLAGS, objective, optimizer, metrics): """ pre-trained resnet50 model """ input_layer = keras.layers.Input(shape=(FLAGS.input_size, FLAGS.input_size, 3)) base_model = ResNet50(weights="imagenet", include_top=False, pooling=None, input_shape=(FLAGS.input_size, FLAGS.input_size, 3), classes=FLAGS.num_classes, input_layer=input_layer) base_model2 = Inceptionv3(weights="imagenet", include_top=False, pooling=None, input_shape=(FLAGS.input_size, FLAGS.input_size, 3), classes=FLAGS.num_classes, input_layer=input_layer) x = base_model.output x2 = base_model2.output x = keras.layers.GlobalAveragePooling2D()(x) x2 = keras.layers.GlobalAveragePooling2D()(x2) t = keras.layers.Concatenate(axis=1)([x, x2]) t = keras.layers.Dense(500, use_bias=True, activation='relu')(t) t = keras.layers.Dropout(0.3)(t) predictions = Dense(FLAGS.num_classes, activation='softmax')(t) model = Model(inputs=base_model.input, outputs=predictions) model.compile(loss=objective, optimizer=optimizer, metrics=metrics) return model
def model_fn(FLAGS, objective, optimizer, metrics): inputs_dim = Input(shape=(FLAGS.input_size, FLAGS.input_size, 3)) x = ResNet50(weights="imagenet", include_top=False, pooling=max, input_shape=(FLAGS.input_size, FLAGS.input_size, 3), classes=FLAGS.num_classes)(inputs_dim) squeeze = GlobalAveragePooling2D()(x) excitation = Dense(units=2048 // 16)(squeeze) excitation = Activation('relu')(excitation) excitation = Dense(units=2048)(excitation) excitation = Activation('sigmoid')(excitation) excitation = Reshape((1, 1, 2048))(excitation) scale = multiply([x, excitation]) x = GlobalAveragePooling2D()(scale) # x = Dropout(0.3)(x) fc2 = Dense(FLAGS.num_classes)(x) fc2 = Activation('sigmoid')(fc2) #此处注意,为sigmoid函数 model = Model(inputs=inputs_dim, outputs=fc2) # model.load_weights('/home/work/user-job-dir/src/SE-Xception.h5',by_name=True) # model = load_model('/home/work/user-job-dir/src/SE-Xception.h5') model.compile(loss=objective, optimizer=optimizer, metrics=metrics) return model
def get_by_name(model_name, *args, **kwargs): model = None if model_name == 'resnet50': from models.resnet50 import ResNet50 model = ResNet50(*args, **kwargs) else: raise ValueError("Model %s not recognized." % model_name) print("Model %s was created" % model.name) return model
def model_fn(FLAGS, objective, optimizer, metrics): base_model = ResNet50(weights="imagenet", include_top=False, pooling=None, input_shape=(FLAGS.input_size, FLAGS.input_size, 3), classes=FLAGS.num_classes) base_model = multi_gpu_model(base_model, 4) for layer in base_model.layers: layer.trainable = False x = base_model.output x = Flatten()(x) predictions = Dense(FLAGS.num_classes, activation='softmax')(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile(loss=objective, optimizer=optimizer, metrics=metrics) return model
def model_fn(FLAGS, objective, optimizer, metrics): """ pre-trained resnet50 model """ base_model = ResNet50(weights="imagenet", include_top=False, pooling='avg', input_shape=(FLAGS.input_size, FLAGS.input_size, 3), classes=FLAGS.num_classes) # for layer in base_model.layers: # layer.trainable = False x = base_model.output x = Dropout(0.5)(x) predictions = Dense(FLAGS.num_classes, activation='softmax')(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile(loss=objective, optimizer=optimizer, metrics=metrics) return model
def model_fn(FLAGS, objective, optimizer, metrics): """ pre-trained resnet50 model """ base_model = ResNet50(weights="imagenet", include_top=False, pooling=None, input_shape=(FLAGS.input_size, FLAGS.input_size, 3), classes=FLAGS.num_classes) for layer in base_model.layers: layer.trainable = False x = base_model.output x = Flatten()(x) # 把多维的输入一维化,常用在从卷积层到全连接层的过渡 predictions = Dense(FLAGS.num_classes, activation='softmax')(x) # 全连接神经网络层 model = Model(inputs=base_model.input, outputs=predictions) # 定义模型的输入输出 model.compile(loss=objective, optimizer=optimizer, metrics=metrics) # 编译创建好的模型 return model
def load_model(model_name): print("Loading {} model...".format(model_name)) if model_name == "resnet50": model = ResNet50(weights='imagenet') elif model_name == "inceptionv3": model = InceptionV3(weights='imagenet') elif model_name == "vgg19": model = VGG19(weights='imagenet') elif model_name == "vgg16": model = VGG16(weights='imagenet') elif model_name == "alexnet": sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True) model = convnet('alexnet', weights_path="../alexnet/alexnet_weights.h5", heatmap=False) model.compile(optimizer=sgd, loss='mse') else: raise ValueError("Wrong model name") print("Loaded!") return model, model_name
def _load_model(model_name, include_top=True, weights='imagenet'): if model_name == 'resnet50': from models.resnet50 import ResNet50 model = ResNet50(weights=weights, include_top=include_top) elif model_name == 'vgg16': from models.vgg16 import VGG16 model = VGG16(weights=weights, include_top=include_top) if weights == 'hybrid1365': model.load_weights(join(weights_dir, 'vgg16_hybrid1365_weights.h5')) elif weights == 'places365': model.load_weights(join(weights_dir, 'vgg16_places365_weights.h5')) elif model_name == 'vgg19': from models.vgg19 import VGG19 model = VGG19(weights=weights, include_top=include_top) elif model_name == 'inception': from models.inception_v3 import InceptionV3 model = InceptionV3(weights=weights, include_top=include_top) elif model_name == 'vgg16_hybrid1365': from models.vgg16 import VGG16 model = VGG16(weights=weights, include_top=include_top) model.load_weights(join(weights_dir, 'vgg16_hybrid1365_weights.h5'), by_name=True) elif model_name == 'vgg16_places365': from models.vgg16 import VGG16 model = VGG16(weights=weights, include_top=include_top) model.load_weights(join(weights_dir, 'vgg16_places365_weights.h5'), by_name=True) elif model_name == 'resnet152_hybrid1365': from models.resnet import ResnetBuilder model = ResnetBuilder.build_resnet_152((3, 224, 224), 100) print(join(weights_dir, 'resnet152_hybrid1365.h5')) model.load_weights(join(weights_dir, 'resnet152_hybrid1365.h5'), by_name=True) else: print( 'Not a valid model. Valid models are vgg16, vgg19, resnet50, inception, vgg16_hybrid1365, and vgg16_places365' ) sys.exit(1) return model
def model_fn(FLAGS, objective, optimizer, metrics): """ pre-trained resnet50 model """ base_model = ResNet50(weights="imagenet", include_top=False, pooling=None, input_shape=(FLAGS.input_size, FLAGS.input_size, 3), classes=FLAGS.num_classes) for layer in base_model.layers: layer.trainable = False x = base_model.output x = Flatten()(x) x = Dense(256, activation='sigmoid', kernel_regularizer=regularizers.l1(0.0001))(x) x = Dropout(rate=0.3)(x) predictions = Dense(FLAGS.num_classes, activation='softmax')(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile(loss=objective, optimizer=optimizer, metrics=metrics) return model
def model_fn(FLAGS): # K.set_learning_phase(0) # setup model base_model = ResNet50(weights="imagenet", include_top=False, pooling=None, input_shape=(FLAGS.input_size, FLAGS.input_size, 3), classes=FLAGS.num_classes) for layer in base_model.layers: layer.trainable = False # if FLAGS.mode == 'train': # K.set_learning_phase(1) model = add_new_last_layer(base_model, FLAGS.num_classes) # print(model.summary()) # print(model.layers[84].name) # exit() # Adam = adam(lr=FLAGS.learning_rate,clipnorm=0.001) model.compile(optimizer="adam", loss='categorical_crossentropy', metrics=['accuracy']) return model
def model_fn(FLAGS, objective, optimizer, metrics, input_shape=None): """ pre-trained resnet50 model """ # base_model = keras.applications.resnet50.ResNet50(include_top=False, weights='imagenet', input_tensor=None, input_shape=(FLAGS.input_size, FLAGS.input_size, 3), pooling=None, classes=FLAGS.num_classes) # base_model = ResNext50(weights=None, # include_top=False, # pooling=None, # input_shape=(FLAGS.input_size, FLAGS.input_size, 3), # classes=FLAGS.num_classes) # base_model = ResNet50(weights='imagenet', # include_top=False, # pooling=None, # input_shape=(FLAGS.input_size, FLAGS.input_size, 3), # classes=FLAGS.num_classes) input_shape = _obtain_input_shape(input_shape, default_size=224, min_size=32, data_format=backend.image_data_format(), require_flatten=True, weights='imagenet') # # inp=Input(shape=(FLAGS.batch_size,FLAGS.input_size,FLAGS.input_size,3)) inp = Input(input_shape) base_model = EfficientNetB5( # input_tensor=inp, weights='/home/work/user-job-dir/src/weights/efficientnet-b5_notop.h5', include_top=False, pooling=None, input_shape=(FLAGS.input_size, FLAGS.input_size, 3), classes=FLAGS.num_classes) # base_model.load_weights('/home/work/user-job-dir/src/weights/efficientnet-b5_notop.h5') base_model1 = InceptionV3(input_tensor=inp, weights='imagenet', include_top=False, pooling=None, input_shape=(FLAGS.input_size, FLAGS.input_size, 3), classes=FLAGS.num_classes) base_model2 = Xception(input_tensor=inp, weights='imagenet', include_top=False, pooling=None, input_shape=(FLAGS.input_size, FLAGS.input_size, 3), classes=FLAGS.num_classes) base_model3 = ResNet50(input_tensor=inp, weights='imagenet', include_top=False, pooling=None, input_shape=(FLAGS.input_size, FLAGS.input_size, 3), classes=FLAGS.num_classes) for layer in base_model1.layers: layer.trainable = True for layer in base_model2.layers: layer.trainable = True for layer in base_model3.layers: layer.trainable = True # x1 = base_model1.output # x1 = Flatten()(x1) inception = base_model1.output xception = base_model2.output densenet = base_model3.output top1_model = GlobalMaxPooling2D(data_format='channels_last')(inception) top2_model = GlobalMaxPooling2D(data_format='channels_last')(xception) top3_model = GlobalMaxPooling2D(data_format='channels_last')(densenet) top1_model = Flatten()(top1_model) top2_model = Flatten()(top2_model) top3_model = Flatten()(top3_model) t = concatenate([top1_model, top2_model, top3_model], axis=1) # # # t=Flatten()(t) # top_model=Dense(512,activation='relu')(t) # top_model=Dropout(rate=0.5)(top_model) for layer in base_model.layers: layer.trainable = True out = base_model.output out = Flatten()(out) predictions = Dense(FLAGS.num_classes, activation='softmax')(out) model = Model(inputs=base_model.input, outputs=predictions) model.compile(loss=objective, optimizer=optimizer, metrics=metrics) return model
def __init__(self): """Creates the VGGFace2 neural network, MTCNN and dlib face detectors.""" self.vggface = ResNet50(n_classes=8631) self.vggface.load_weights(WEIGHTS) self.mtcnn = MTCNN() self.dlib = Dlib()
def init_resnet(self): from models.resnet50 import ResNet50 self.base_model = ResNet50(weights='imagenet', include_top=False) self.input_shape = 224 self.feature_dim = 2048 self.output_shape = self.base_model.output_shape