def get_model(train=True): if Path('model.h5').is_file(): return load_model('model.h5') datagen = ImageDataGenerator( rotation_range=10, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.1, zoom_range=0.2, horizontal_flip=False, preprocessing_function=gen_preprocess, fill_mode='nearest') data_generator = datagen.flow_from_directory( directory='train_data/', target_size=IMG_SIZE, class_mode='categorical') print(data_generator.classes) validgen = ImageDataGenerator(preprocessing_function=gen_preprocess) valid_generator = validgen.flow_from_directory( directory='valid_data/', target_size=IMG_SIZE, class_mode='categorical', shuffle=False ) test_generator = validgen.flow_from_directory( directory='test_data/', target_size=IMG_SIZE, class_mode='categorical', shuffle=False ) model = SqueezeNet() print(model.summary()) x = Convolution2D(4, (1, 1), padding='same', name='conv11')(model.layers[-5].output) x = Activation('relu', name='relu_conv11')(x) x = GlobalAveragePooling2D()(x) x = Activation('softmax')(x) # x= Dense(4, activation='softmax')(x) # x = Dense(4, activation='softmax')(model.layers[-2].output) model = Model(model.inputs, x) print(model.summary()) # Following is the original model I was training # model = Sequential() # # model.add(Convolution2D(16, 3, 3, # border_mode='same', # input_shape=IMG_SHAPE)) # model.add(MaxPooling2D(pool_size=(3, 3))) # model.add(Activation('relu')) # model.add(Dropout(0.2)) # # model.add(Convolution2D(32, 3, 3, # border_mode='same')) # model.add(MaxPooling2D(pool_size=(3, 3))) # model.add(Activation('relu')) # model.add(Dropout(0.2)) # # model.add(Convolution2D(48, 3, 3, # border_mode='same')) # model.add(MaxPooling2D(pool_size=(2, 2))) # model.add(Activation('relu')) # model.add(Dropout(0.2)) # # # model.add(Convolution2D(64, 3, 3, # border_mode='same')) # model.add(MaxPooling2D(pool_size=(2, 2))) # model.add(Activation('relu')) # model.add(Dropout(0.2)) # # # model.add(Convolution2D(64, 3, 3, # border_mode='same')) # model.add(MaxPooling2D(pool_size=(2, 2))) # model.add(Activation('relu')) # model.add(Dropout(0.2)) # # # 1st Layer - Add a flatten layer # model.add(Flatten()) # # model.add(Dense(1164)) # model.add(Activation('relu')) # model.add(Dropout(0.2)) # # model.add(Dense(128)) # model.add(Activation('tanh')) # model.add(Dropout(0.2)) # # # 2nd Layer - Add a fully connected layer # model.add(Dense(50)) # model.add(Activation('relu')) # model.add(Dropout(0.2)) # # model.add(Dense(10)) # model.add(Activation('relu')) # model.add(Dropout(0.2)) # # # 4th Layer - Add a fully connected layer # model.add(Dense(4)) # # 5th Layer - Add a ReLU activation layer # model.add(Activation('softmax')) # TODO: Build a Multi-layer feedforward neural network with Keras here. # TODO: Compile and train the model filepath = "weights-improvement-{epoch:02d}-{loss:.2f}.hdf5" callbacks = [ EarlyStopping(monitor='loss', min_delta=0.01, patience=2, verbose=1), LambdaCallback(on_epoch_end=lambda batch,logs: evaluate_model(model, test_generator)), ModelCheckpoint(filepath=filepath, monitor='loss', save_best_only=True, verbose=1), ] model.compile(keras.optimizers.Adam(lr=0.0001), 'categorical_crossentropy', ['accuracy']) model.fit_generator(data_generator, steps_per_epoch=400, epochs=30, verbose=1, callbacks=callbacks) evaluate_model(model, test_generator) model.save('model.h5', True) return model
model = SqueezeNet( include_top=False ) # Construct the feature extraction layers in squeezenet. model = add_squeezenet_top( model, args.classes, False) # Add the classification layers to squeezenet. data_set = Dataloader(args.data_base, args.label_path) # Construct the Dataloader class data, label = data_set.read_data(args.pos_neg_num, args.target_img_size) # Read the data X_train, X_test, y_train, y_test = train_test_split( data, label, test_size=args.split_ratio, random_state=42) # Split the data into training and validation y_train = keras.utils.to_categorical(y_train, args.classes) y_test = keras.utils.to_categorical(y_test, args.classes) # Convert the label to one-hot label batch_size = args.batch_epoch_size[ 0] # Set the batch size of training, normally 16 or 32 nb_epoch = args.batch_epoch_size[1] # Set the epoch size of training model.fit(X_train, y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, y_test)) # Start training model.save(args.model_path) # Save the model to specific path
model = SqueezeNet() #保存model部分 # save as JSON 保存模型的结构,而不包含其权重或配置信息 model_json = model.to_json() # with open("model.json", "w") as json_file: # json_file.write(model_json) # del_json = model.to_json() # json_file.write(model_json) #只保存权重 model.save_weights("model.h5") #保存model和权重 model.save('my_model.h5') # creates a HDF5 file 'my_model.h5' del model # deletes the existing model model = load_model( 'my_model.h5') # returns a compiled model identical to the previous one #img = image.load_img(img_path, target_size=(224, 224)) # 加载图像,归一化大小 img = image.load_img('images/cat.jpeg', target_size=(227, 227)) x = image.img_to_array(img) # 序列化 x = np.expand_dims(x, axis=0) # 展开 x = preprocess_input(x) # 预处理到0~1 preds = model.predict(x) # 预测结果,1000维的向量 print( 'Predicted:', decode_predictions(preds)) # decode_predictions 输出5个最高概率:(类名, 语义概念, 预测概率)