예제 #1
0
VOC_test_data, VOC_info = helpers.get_dataset("voc/2007", "test")
labels = helpers.get_labels(VOC_info)
# We add 1 class for background
hyper_params["total_labels"] = len(labels) + 1
# If you want to use different dataset and don't know max height and width values
# You can use calculate_max_height_width method in helpers
max_height, max_width = helpers.VOC["max_height"], helpers.VOC["max_width"]
VOC_test_data = VOC_test_data.map(lambda x : helpers.preprocessing(x, max_height, max_width))

padded_shapes, padding_values = helpers.get_padded_batch_params()
VOC_test_data = VOC_test_data.padded_batch(batch_size, padded_shapes=padded_shapes, padding_values=padding_values)

base_model = VGG16(include_top=False)
if hyper_params["stride"] == 16:
    base_model = Sequential(base_model.layers[:-1])
rpn_model = rpn.get_model(base_model, hyper_params)

frcnn_model_path = helpers.get_model_path("frcnn", hyper_params["stride"])
rpn_model_path = helpers.get_model_path("rpn", hyper_params["stride"])
model_path = frcnn_model_path if load_weights_from_frcnn else rpn_model_path
rpn_model.load_weights(model_path, by_name=True)

for image_data in VOC_test_data:
    img, gt_boxes, gt_labels = image_data
    input_img, anchors = rpn.get_step_data(image_data, hyper_params, preprocess_input, mode="inference")
    rpn_bbox_deltas, rpn_labels = rpn_model.predict_on_batch(input_img)
    #
    anchors_shape = tf.shape(anchors)
    batch_size, anchor_row_size = anchors_shape[0], anchors_shape[1]
    rpn_bbox_deltas = tf.reshape(rpn_bbox_deltas, (batch_size, anchor_row_size, 4))
    rpn_labels = tf.reshape(rpn_labels, (batch_size, anchor_row_size, 1))
예제 #2
0
load_weights = False

VOC_train_data, VOC_train_data_len = Helpers.get_VOC_data("train")
VOC_val_data, VOC_val_data_len = Helpers.get_VOC_data("validation")

#生成数据流(重点)
rpn_train_feed = rpn.generator(VOC_train_data, anchor_ratios, anchor_scales, stride, preprocess_input, max_height=max_height, max_width=max_width, apply_padding=apply_padding)
rpn_val_feed = rpn.generator(VOC_val_data, anchor_ratios, anchor_scales, stride, preprocess_input, max_height=max_height, max_width=max_width, apply_padding=apply_padding)

base_model = VGG16(include_top=False, weights="imagenet")
if stride == 16:
    base_model = Sequential(base_model.layers[:-1])

model_path = Helpers.get_model_path(stride)
#预训练模型——》lr+cl
rpn_model = rpn.get_model(base_model, anchor_count)
if load_weights:
    rpn_model.load_weights(model_path)
rpn_model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.000001),
                  loss=[rpn.rpn_reg_loss, rpn.rpn_cls_loss],
                  loss_weights=[10., 1.])

#回调函数设置
model_checkpoint = ModelCheckpoint(model_path, save_best_only=True, save_weights_only=True, monitor="val_loss", mode="auto")
early_stopping = EarlyStopping(monitor="val_loss", patience=20, verbose=0, mode="auto")

step_size_train = VOC_train_data_len // batch_size
step_size_val = VOC_val_data_len // batch_size
#训练过程
rpn_model.fit_generator(generator=rpn_train_feed,
                        steps_per_epoch=step_size_train,