def load_emotion_model(model_path): img_width, img_height = 224, 224 num_channels = 3 num_classes = 7 emotion_model = resnet101_model(img_height, img_width, num_channels, num_classes) emotion_model.load_weights(model_path, by_name=True) return emotion_model
def RefineNet101(input_shape, num_class, resnet_weights=None, frontend_trainable=True): '''Returns RefineNet model''' # for layer in base_model.layers: # layer.trainable = False model_base = resnet101_model(input_shape, resnet_weights) # Get ResNet block output layers feature_maps = model_base.output filter_num = [512, 256, 256, 256] for i in range(4): feature_maps[i] = Conv2D(filter_num[i], 1, padding='same')(feature_maps[i]) rf4 = RefineNetBlock(upper=feature_maps[0], lower=None) rf3 = RefineNetBlock(upper=feature_maps[1], lower=rf4) rf2 = RefineNetBlock(upper=feature_maps[2], lower=rf3) rf1 = RefineNetBlock(upper=feature_maps[3], lower=rf2) y = RCU(tensor=rf1, filters=256) y = RCU(tensor=y, filters=256) '''Bilinear causes the model not to learn anything, currently under test''' y = Upsample(tensor=y, method='transpose', scale=4) output = Conv2D(filters=num_class, kernel_size=(3, 3))(y) output = Conv2D(filters=1, kernel_size=(1, 1))(y) output = Activation('sigmoid', name='output_layer')(output) model = Model(inputs=model_base.input, outputs=output, name='RefineNet') print('*** Building Network Completed ***') print('*** Model Output Shape => ', model.output_shape, '***') model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy']) model.summary() # # if(pretrained_weights): # model.load_weights(pretrained_weights) plot_model(model, to_file='refinenet_101.png', show_shapes=True) return model
num_corrects += 1 return num_corrects / num_test_samples if __name__ == '__main__': img_width, img_height = 224, 224 num_channels = 3 num_classes = 7 class_names = [ 'Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral' ] # emotion = {0:'愤怒', 1:'厌恶', 2:'恐惧', 3:'高兴', 4:'悲伤', 5:'惊讶', 6: '无表情'} num_test_samples = 3589 print("\nLoad the trained ResNet model....") model = resnet101_model(img_height, img_width, num_channels, num_classes) model.load_weights("models/model.best.hdf5", by_name=True) y_pred, y_prob = predict(model, 'fer2013/test') print("y_pred: " + str(y_pred)) y_test = read_data('fer2013/fer2013.csv') y_test = decode(y_test) print("y_test: " + str(y_test)) acc = calc_acc(y_pred, y_test) print("%s: %.2f%%" % ('acc', acc * 100)) # Compute confusion matrix cnf_matrix = confusion_matrix(y_test, y_pred) np.set_printoptions(precision=2)
def build_refinenet(input_shape, num_class, resnet_weights=None, frontend_trainable=True): """ Builds the RefineNet model. Arguments: input_shape: Size of input image, including number of channels num_classes: Number of classes resnet_weights: Path to pre-trained weights for ResNet-101 frontend_trainable: Whether or not to freeze ResNet layers during training Returns: RefineNet model """ # Build ResNet-101 model_base = resnet101_model(input_shape, resnet_weights) print("model_base=====", model_base) # Get ResNet block output layers high = model_base.output low = [None, None, None] # Get the feature maps to the proper size with bottleneck high[0] = Conv2D(512, 1, padding='same', name='resnet_map1', kernel_initializer=kern_init, kernel_regularizer=kern_reg)(high[0]) high[1] = Conv2D(256, 1, padding='same', name='resnet_map2', kernel_initializer=kern_init, kernel_regularizer=kern_reg)(high[1]) high[2] = Conv2D(256, 1, padding='same', name='resnet_map3', kernel_initializer=kern_init, kernel_regularizer=kern_reg)(high[2]) high[3] = Conv2D(256, 1, padding='same', name='resnet_map4', kernel_initializer=kern_init, kernel_regularizer=kern_reg)(high[3]) for h in high: h = BatchNormalization()(h) # RefineNet low[0] = RefineBlock(high_inputs=high[0], low_inputs=None, block=4) # Only input ResNet 1/32 low[1] = RefineBlock( high_inputs=high[1], low_inputs=low[0], block=3) # High input = ResNet 1/16, Low input = Previous 1/16 low[2] = RefineBlock( high_inputs=high[2], low_inputs=low[1], block=2) # High input = ResNet 1/8, Low input = Previous 1/8 net = RefineBlock( high_inputs=high[3], low_inputs=low[2], block=1) # High input = ResNet 1/4, Low input = Previous 1/4. net = ResidualConvUnit(net, name='rf_rcu_o1_') net = ResidualConvUnit(net, name='rf_rcu_o2_') net = UpSampling2D(size=4, name='rf_up_o')(net) # net = Conv2D(num_class, 3, activation='relu',padding='same', name='rf_pred1')(net) net = Conv2D(1, 1, activation='sigmoid', padding='same', name='rf_pred')(net) model = Model(model_base.input, net) # for layer in model.layers: # if 'rb' in layer.name or 'rf_' in layer.name: # layer.trainable = True # else: # layer.trainable = frontend_trainable model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy']) model.summary() # # if(pretrained_weights): # model.load_weights(pretrained_weights) plot_model(model, to_file='refinenet.png', show_shapes=True) return model
def __init__(self, height=180, width=180, batch_size=64, max_epochs=6, base_model='inceptionResnetV2', num_classes=5270): self.height = height self.width = width self.batch_size = batch_size self.max_epochs = max_epochs self.base_model = base_model self.num_classes = num_classes self.make_category_tables() # Check if it works: print(self.cat2idx[1000012755], self.idx2cat[4]) if not os.path.exists("train_offsets.csv"): self.read_bson_files() else: self.train_offsets_df = pd.DataFrame.from_csv("train_offsets.csv") if not os.path.exists("train_images.csv") or not os.path.exists( "val_images.csv"): self.train_val_split() self.data_generator() # models = Models(input_shape=(self.height, self.width, 3), classes=self.num_classes) # if self.base_model == 'vgg16': # models.vgg16() # elif self.base_model == 'vgg19': # models.vgg19() # elif self.base_model == 'resnet50': # models.resnet50() # elif self.base_model == 'inceptionV3': # models.inceptionV3() # else: # print('Uknown base model') # raise SystemExit # # models.compile(optimizer=RMSprop(lr=1e-4)) # self.model = models.get_model() if self.base_model == 'resnet101': self.model = resnet101_model(self.height, self.width, color_type=3, num_classes=self.num_classes) elif self.base_model == 'resnet152': self.model = resnet152_model(self.height, self.width, color_type=3, num_classes=self.num_classes) else: models = Models(input_shape=(self.height, self.width, 3), classes=self.num_classes) if self.base_model == 'inceptionV4': models.inceptionV3() elif self.base_model == 'inceptionResnetV2': models.inceptionResnetV2() sgd = SGD(lr=1e-4, decay=1e-6, momentum=0.9, nesterov=True) models.compile(optimizer=sgd) self.model = models.get_model() self.model.summary()
def test_ensemble(self): ''' test ensemble several different models ''' model1 = resnet101_model(180, 180, color_type=3, num_classes=self.num_classes, mode=0) model2 = resnet101_model(160, 160, color_type=3, num_classes=self.num_classes, mode=1) # model3 = resnet152_model(160, 160, color_type=3, num_classes=self.num_classes) models = Models(input_shape=(180, 180, 3), classes=self.num_classes) models.inceptionResnetV2() model4 = models.get_model() model1.load_weights('../weights/best_weights_resnet101.hdf5') model2.load_weights('../weights/best_weights_resnet101_160crop.hdf5') # model3.load_weights('../weights/best_weights_resnet152_160crop.hdf5') model4.load_weights('../weights/best_weights_inceptionResnetV2.hdf5') submission_df = pd.read_csv(data_dir + "sample_submission.csv") submission_df.head() test_datagen_resnet = ImageDataGenerator( preprocessing_function=preprocess_input) test_datagen_inception = ImageDataGenerator( preprocessing_function=inception_preprocess_input) # models = [{"model": model1, "crop": False, "datagen": test_datagen_resnet}, # {"model": model2, "crop": True, "datagen": test_datagen_resnet}, # {"model": model4, "crop": False, "datagen": test_datagen_inception}] models = [{ "model": model1, "crop": False, "datagen": test_datagen_resnet, "weight": 0.6 }, { "model": model2, "crop": True, "datagen": test_datagen_resnet, "weight": 0.7 }] data = bson.decode_file_iter(open(test_bson_path, "rb")) with tqdm(total=num_test_products) as pbar: for c, d in enumerate(data): num_imgs = len(d["imgs"]) avg_pred = 0 for model in models: batch_x = [] for i in range(num_imgs): bson_img = d["imgs"][i]["picture"] # Load and preprocess the image. img = load_img( io.BytesIO(bson_img) ) #, target_size=(self.height, self.width)) x = img_to_array(img) batch_x.append(self.preprocess(x, model)) batch_x = np.array(batch_x, dtype=K.floatx()) prediction = model["model"].predict(batch_x, batch_size=num_imgs) avg_pred += model["weight"] * self.blending( prediction, 'mean') cat_idx = np.argmax(avg_pred) submission_df.iloc[c]["category_id"] = self.idx2cat[cat_idx] pbar.update() submission_df.to_csv( "../submit/my_submission_ensemble{}.csv.gz".format(len(models)), compression="gzip", index=False)