conv_base = Net(weights='imagenet', include_top=False, input_shape=input_shape)
# model = add_new_last_layer(conv_base, nb_classes)
model = models.Sequential()
# model.add(layers.Flatten(name="flatten"))
model.add(conv_base)
model.add(layers.GlobalMaxPooling2D(name="gap"))
if dropout_rate > 0:
    model.add(layers.Dropout(dropout_rate, name="dropout_out"))
# model.add(layers.Dense(256, activation='relu', name="fc1"))
model.add(layers.Dense(2, activation='softmax', name="fc_out"))  # 这里的2代表要分类的数目

# 输出网络模型参数  查看一下实例化后卷积基模型
# model.summary()

# 冻结卷积层不参与训练
conv_base.trainable = False

model.compile(loss='categorical_crossentropy',
              optimizer=optimizers.RMSprop(lr=2e-5),
              metrics=['acc'])

# # 更好地保存模型 Save the model after every epoch.
# output_model_file = './output_model_file/checkpoint-{epoch:02d}e-val_acc_{val_acc:.2f}.hdf5'
# # keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=False, save_weights_only=False, mode='auto', period=1)
# checkpoint = ModelCheckpoint(output_model_file, monitor='val_acc', verbose=1, save_best_only=True)
# model.save('./output_model_file/my_model.h5')

history_tl = model.fit_generator(  # 开始训练
    train_generator,
    steps_per_epoch=NUM_TRAIN // batch_size,
    # samples_per_epoch=nb_train_samples,
conv_base = Net(weights='imagenet', include_top=False, input_shape=input_shape)
# model = add_new_last_layer(conv_base, nb_classes)
model = models.Sequential()
# model.add(layers.Flatten(name="flatten"))
model.add(conv_base)
model.add(layers.GlobalMaxPooling2D(name="gap"))
if dropout_rate > 0:
    model.add(layers.Dropout(dropout_rate, name="dropout_out"))
# model.add(layers.Dense(256, activation='relu', name="fc1"))
model.add(layers.Dense(2, activation='softmax', name="fc_out"))  # 这里的2代表要分类的数目

# model.summary()

# 冻结卷积层不参与训练
conv_base.trainable = False

model.compile(loss='categorical_crossentropy',
              optimizer=optimizers.RMSprop(lr=2e-5),
              metrics=['acc'])

model.load_weights('./output_model_file/my_model.h5')

# Fine tuning 参考CSDN:https://blog.csdn.net/nima1994/article/details/79952368
# Fine tuning last several layers   以下是Fine-tunning代码块

# multiply_16
# set 'multiply_16' and following layers trainable
conv_base.trainable = True

set_trainable = False
Exemplo n.º 3
0
#Show architecture model
model = models.Sequential()
model.add(conv_base)
model.add(layers.GlobalMaxPooling2D(name="gap"))
# model.add(layers.Flatten(name="flatten"))
if dropout_rate > 0:
    model.add(layers.Dropout(dropout_rate, name="dropout_out"))
# model.add(layers.Dense(256, activation='relu', name="fc1"))
model.add(layers.Dense(13, activation='softmax', name="fc_out"))        #class --> 13 **Sub
model.summary()

#showing before&after freezing
print('This is the number of trainable layers '
      'before freezing the conv base:', len(model.trainable_weights))
conv_base.trainable = False  # freeze เพื่อรักษา convolutional base's weight
print('This is the number of trainable layers '
      'after freezing the conv base:', len(model.trainable_weights))  #freez แล้วจะเหลือ max pool and dense

#Training
model.compile(loss='categorical_crossentropy',
              optimizer=optimizers.RMSprop(lr=2e-5),
              metrics=['acc'])

#สร้าง folder TensorBoard
root_logdir = '/media/tohn/SSD/Sub_Efficient_USAI/my_logs'
def get_run_logdir():
    import time
    run_id = time.strftime("run_%Y_%m_%d_%H_%M_%S")
    return os.path.join(root_logdir,run_id)
run_logdir = get_run_logdir()