def _extract_backbone(self): """extract feature map from backbone. """ if self.backbone == 'large': from model.mobilenet_v3_large import MobileNetV3_Large model = MobileNetV3_Large(self.shape, self.n_class).build() layer_name8 = 'batch_normalization_13' layer_name16 = 'add_5' elif self.backbone == 'small': from model.mobilenet_v3_small import MobileNetV3_Small model = MobileNetV3_Small(self.shape, self.n_class).build() layer_name8 = 'batch_normalization_7' layer_name16 = 'add_2' else: raise Exception('Invalid backbone: {}'.format(self.backbone)) if self.weights is not None: model.load_weights(self.weights) inputs= model.input # 1/8 feature map. out_feature8 = model.get_layer(layer_name8).output # 1/16 feature map. out_feature16 = model.get_layer(layer_name16).output return inputs, out_feature8, out_feature16
def build(self, plot=False): """build Lite R-ASPP. # Arguments plot: Boolean, weather to plot model. # Returns model: Model, model. """ from model.mobilenet_v3_small import MobileNetV3_Small model = MobileNetV3_Small(self.shape, 1, alpha=1.0, include_top=False).build() inputs = model.input # x = model.get_layer('activation_25').output x = model.get_layer('global_average_pooling2d_10').output x = Dropout(0.5)(x) # x = Flatten()(x) x = Dense(100, activation='elu')(x) x = Dropout(0.5)(x) x = Dense(50, activation='elu')(x) x = Dense(10, activation='elu')(x) x = Dense(1)(x) model = Model(inputs=inputs, outputs=x) if plot: plot_model(model, to_file='images/LR_ASPP.png', show_shapes=True) return model
def pre(img, save_model = False): global model global model_shape if model is None: with open('config/config.json', 'r') as f: cfg = json.load(f) save_dir = cfg['save_dir'] model_shape = (int(cfg['height']), int(cfg['width']), 3) n_class = int(cfg['class_number']) batch = int(cfg['batch']) if not os.path.exists(save_dir): os.mkdir(save_dir) if cfg['model'] == 'large': from model.mobilenet_v3_large import MobileNetV3_Large model = MobileNetV3_Large(model_shape, n_class).build() if cfg['model'] == 'small': from model.mobilenet_v3_small import MobileNetV3_Small model = MobileNetV3_Small(model_shape, n_class).build() if cfg['model'] == 'mymodel': from model.my_model import MyModel model = MyModel(model_shape, n_class).build() if cfg['model'] == 'v2': from model.mobilenet_v2 import MyModel model = MyModel(model_shape, n_class).build() pre_weights = "save/v3_weights0.87-0.87.h5"#cfg['weights'] if pre_weights and os.path.exists(pre_weights): model.load_weights(pre_weights, by_name=True) print("------------------load pre model!!!!!") if(save_model): print("Finish save.") model.save('save/model_all.h5') # 预处理 img = cv2.resize(img, model_shape[:2]) img = img*1. / 255 pre = model.predict(np.array([img])) #print("pre: ",pre,np.argmax(pre[0])) #pre_cate = np.argmax(pre[0]) return pre
def train(): with open('config/config.json', 'r') as f: cfg = json.load(f) save_dir = cfg['save_dir'] shape = (int(cfg['height']), int(cfg['width']), 3) n_class = int(cfg['class_number']) batch = int(cfg['batch']) if not os.path.exists(save_dir): os.mkdir(save_dir) if cfg['model'] == 'large': from model.mobilenet_v3_large import MobileNetV3_Large model = MobileNetV3_Large(shape, n_class).build() if cfg['model'] == 'small': from model.mobilenet_v3_small import MobileNetV3_Small model = MobileNetV3_Small(shape, n_class).build() pre_weights = cfg['weights'] if pre_weights and os.path.exists(pre_weights): model.load_weights(pre_weights, by_name=True) opt = Adam(lr=float(cfg['learning_rate'])) earlystop = EarlyStopping(monitor='val_acc', patience=5, verbose=0, mode='auto') model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) train_generator, validation_generator, count1, count2 = generate( batch, shape[:2], cfg['train_dir'], cfg['eval_dir']) hist = model.fit_generator(train_generator, validation_data=validation_generator, steps_per_epoch=count1 // batch, validation_steps=count2 // batch, epochs=cfg['epochs'], callbacks=[earlystop]) df = pd.DataFrame.from_dict(hist.history) df.to_csv(os.path.join(save_dir, 'hist.csv'), encoding='utf-8', index=False) model.save_weights( os.path.join(save_dir, '{}_weights.h5'.format(cfg['model'])))
input_size = 32 model_path = "weights/" shape = (input_size, input_size, 3) classes = 10 alpha = 0.2 message = "Input size : 32 x 32" label = [ 'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck' ] # model load print("V3_32 Model loading...") model = MobileNetV3_Small(shape, classes, alpha).build() model.load_weights(path + 'v3_32.h5') def main(): camera_width = 352 camera_height = 288 fps = "" flag_score = False elapsedTime = 0 cap = cv2.VideoCapture(0) cap.set(cv2.CAP_PROP_FPS, 40) cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
def train(): with open('config/config.json', 'r') as f: cfg = json.load(f) save_dir = cfg['save_dir'] shape = (int(cfg['height']), int(cfg['width'])) n_class = int(cfg['class_number']) batch = int(cfg['batch']) if not os.path.exists(save_dir): os.mkdir(save_dir) train_images, train_labels, test_images, test_labels = load_images() if cfg['model'] == 'large': from model.mobilenet_v3_large import MobileNetV3_Large model = MobileNetV3_Large(train_images[0].shape, n_class).build(shape=shape) if cfg['model'] == 'small': from model.mobilenet_v3_small import MobileNetV3_Small model = MobileNetV3_Small(train_images[0].shape, n_class).build(shape=shape) optimizer = build_optimizer(learning_rate=float(cfg['learning_rate']), momentum=0.9) # earlystop = EarlyStopping(monitor='val_accuracy', patience=5, verbose=0, mode='auto') checkpoint = ModelCheckpoint(filepath=os.path.join( save_dir, '{}_weights.h5'.format(cfg['model'])), monitor='val_acc', save_best_only=True, save_weights_only=True) model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) model.summary() # data augmentation datagen1 = ImageDataGenerator( featurewise_center=False, # set input mean to 0 over the dataset samplewise_center=False, # set each sample mean to 0 featurewise_std_normalization= False, # divide inputs by std of the dataset samplewise_std_normalization=False, # divide each input by its std zca_whitening=False, # apply ZCA whitening rotation_range= 15, # randomly rotate images in the range (degrees, 0 to 180) width_shift_range= 0.1, # randomly shift images horizontally (fraction of total width) height_shift_range= 0.1, # randomly shift images vertically (fraction of total height) horizontal_flip=True, # randomly flip images vertical_flip=False) # randomly flip images datagen1.fit(train_images) hist = model.fit_generator(datagen1.flow(train_images, train_labels, batch_size=batch), validation_data=(test_images, test_labels), steps_per_epoch=train_images.shape[0] // batch, epochs=cfg['epochs'], callbacks=[checkpoint]) df = pd.DataFrame.from_dict(hist.history) df.to_csv(os.path.join(save_dir, 'hist.csv'), encoding='utf-8', index=False)