def test_rpn_training(self):

        # setup
        anchors = get_anchors(anchor_scales)
        anchors_per_loc = len(anchors)
        root_dir = os.getcwd()
        ref_weights_path = os.path.join(root_dir, 'reference_rpn_weights.h5')
        tmp_weights_path = os.path.join(root_dir, 'tmp_rpn_weights.h5')

        train_images = make_image_object(os.path.join(root_dir, 'data'), codeTesting=True)
        processed_imgs, _ = resize_imgs(train_images, min_size=resize_min, max_size=resize_max)

        base_model = Models.vgg16_base(weight_regularizer=Models.WEIGHT_REGULARIZER,
                                       bias_regularizer=Models.BIAS_REGULARIZER)
        rpn_model = Models.vgg16_rpn(base_model, weight_regularizer=Models.WEIGHT_REGULARIZER,
                                     bias_regularizer=Models.BIAS_REGULARIZER, anchors_per_loc=anchors_per_loc)
        preprocess_func = Models.vgg_preprocess
        get_conv_rows_cols_func = Models.vgg_get_conv_rows_cols
        stride = Models.VGG_Stride
        training_manager = RpnTrainingManager(get_conv_rows_cols_func, stride, preprocess_func=preprocess_func,
                                              anchor_dims=anchors)

        # action being tested
        rpn_model = train_rpn(rpn_model, processed_imgs, training_manager, optimizer,
                              phases=[[1, 0.001]])
        print("Testing Done")
Esempio n. 2
0
    def test_resnet_frcnn_training_phase_2(self):
        # setup
        anchors = get_anchors(anchor_scales=[128, 256, 512])
        anchors_per_loc = len(anchors)
        cur_dir = os.path.abspath(os.path.dirname(__file__))
        test_dir = os.path.join(cur_dir, os.pardir, 'test_data')
        base_dir = os.path.join(test_dir, 'VOC_test')
        ref_weights_path = os.path.join(test_dir, 'reference_r50_frcnn_step2_weights.h5')
        tmp_weights_path = os.path.join(test_dir, 'tmp_r50_frcnn_weights.h5')
        rpn_weights_path = os.path.join(test_dir, 'r50_rpn_step1.h5')
        img = extract_img_data(base_dir, '000005')
        training_imgs, resized_ratios = resize_imgs([img])

        model_rpn = resnet50_rpn(resnet50_base(), anchors_per_loc=anchors_per_loc)
        model_rpn.load_weights(filepath=rpn_weights_path)
        model_frcnn = resnet50_classifier(num_rois=64, num_classes=21, base_model=resnet50_base())

        class_mapping = VOC_CLASS_MAPPING
        training_manager = DetTrainingManager(rpn_model=model_rpn, class_mapping=class_mapping, num_rois=NUM_ROIS,
                                              preprocess_func=resnet.preprocess, anchor_dims=anchors)
        optimizer = Adam(lr=0.001)

        # action being tested
        train_detector_step2(detector=model_frcnn, images=training_imgs, training_manager=training_manager,
                             optimizer=optimizer, phases=[[1, 0.0001]])

        # assertion
        last_layer_weights = model_frcnn.get_layer('res5c_branch2c').get_weights()[0]
        with h5py.File(tmp_weights_path, 'w') as file:
            file.create_dataset('last_layer_weights', data=last_layer_weights)
        process = Popen(['h5diff', ref_weights_path, tmp_weights_path], stdout=PIPE, stderr=PIPE)
        process.communicate()
        self.assertEqual(process.returncode, 0)
def train_rpn_step1():
    root_dir = os.getcwd()
    path = os.path.join(root_dir, 'data')
    train_images = make_image_object(path, codeTesting=False)
    print("Done making image Objects")

    anchors = get_anchors(anchor_scales)
    anchors_per_loc = len(anchors)
    processed_imgs, resized_ratios = resize_imgs(train_images,
                                                 min_size=resize_min,
                                                 max_size=resize_max)
    stride, get_conv_rows_cols_func, preprocess_func, rpn_model = None, None, None, None

    if network == "vgg16":
        base_model = Models.vgg16_base(
            weight_regularizer=Models.WEIGHT_REGULARIZER,
            bias_regularizer=Models.BIAS_REGULARIZER)
        rpn_model = Models.vgg16_rpn(
            base_model,
            weight_regularizer=Models.WEIGHT_REGULARIZER,
            bias_regularizer=Models.BIAS_REGULARIZER,
            anchors_per_loc=anchors_per_loc)
        preprocess_func = Models.vgg_preprocess
        get_conv_rows_cols_func = Models.vgg_get_conv_rows_cols
        stride = Models.VGG_Stride

    elif network == "resnet50":
        base_model = Models.resnet50_base(
            weight_regularizer=Models.WEIGHT_REGULARIZER,
            bias_regularizer=Models.BIAS_REGULARIZER)
        rpn_model = Models.resnet50_rpn(
            base_model,
            weight_regularizer=Models.WEIGHT_REGULARIZER,
            bias_regularizer=Models.BIAS_REGULARIZER,
            anchors_per_loc=anchors_per_loc)
        preprocess_func = Models.resnet50_preprocess
        get_conv_rows_cols_func = Models.resnet50_get_conv_rows_cols
        stride = Models.ResNet_Stride

    save_weights_dest = "models/rpn_weights_{}_step1.h5".format(network)
    save_model_dest = "models/rpn_model_{}_step1.h5".format(network)
    training_manager = RpnTrainingManager(get_conv_rows_cols_func,
                                          stride,
                                          preprocess_func=preprocess_func,
                                          anchor_dims=anchors)
    rpn_model = train_rpn(rpn_model,
                          processed_imgs,
                          training_manager,
                          optimizer,
                          phases=phases,
                          save_frequency=2000,
                          save_weights_dest=save_weights_dest,
                          save_model_dest=save_model_dest)

    rpn_model.save_weights(save_weights_dest)
    print('Saved {} rpn weights to {}'.format(network, save_weights_dest))
    rpn_model.save(save_model_dest)
    print('Saved {} rpn model to {}'.format(network, save_model_dest))
Esempio n. 4
0
    if args.network == 'vgg16':
        # don't need to worry about freezing/regularizing rpn because we're not training it
        model_rpn = vgg.rpn_from_h5(args.step3_model_path,
                                    anchors_per_loc=anchors_per_loc)
        model_det = vgg.det_from_h5(args.step4_model_path,
                                    num_classes=num_classes)
        stride = vgg.STRIDE
    else:
        model_rpn = resnet.rpn_from_h5(args.step3_model_path,
                                       anchors_per_loc=anchors_per_loc)
        model_det = resnet.det_from_h5(args.step4_model_path,
                                       num_classes=num_classes)
        stride = resnet.STRIDE
    training_manager = DetTrainingManager(rpn_model=model_rpn,
                                          class_mapping=class_mapping,
                                          preprocess_func=resnet.preprocess,
                                          anchor_dims=anchors)
    resize_min, resize_max = resize_dims_from_str(args.resize_dims)
    processed_imgs, resized_ratios = resize_imgs(test_imgs,
                                                 min_size=resize_min,
                                                 max_size=resize_max)

    dets = get_dets_by_cls(training_manager,
                           model_det,
                           resized_ratios,
                           processed_imgs,
                           stride=stride,
                           det_threshold=det_threshold)
    print(dets)
    write_dets(dets, args.out_dir)
def train_rpn_det():
    """
        ## NOTE: Make NMS use 2k proposals at train time
        ## NOTE: DEBUGGING Script consisting of all the print statements
    """
    root_dir = os.getcwd()
    path = os.path.join(root_dir, 'data')
    train_images = make_image_object(path, codeTesting=False)
    print("Done making image Objects")

    anchors = get_anchors(anchor_scales)
    anchors_per_loc = len(anchors)
    processed_imgs, resized_ratios = resize_imgs(train_images,
                                                 min_size=resize_min,
                                                 max_size=resize_max)
    num_classes = 2
    class_mapping = {'Table': 0, 'bg': 1}

    # Create the record.csv file to record losses, acc and mAP
    record_df = pd.DataFrame(columns=[
        'mean_overlapping_bboxes', 'class_acc', 'loss_rpn_cls',
        'loss_rpn_regr', 'loss_class_cls', 'loss_class_regr', 'curr_loss',
        'mAP'
    ])

    preprocess_func = Models.vgg_preprocess
    get_conv_rows_cols_func = Models.vgg_get_conv_rows_cols
    stride = Models.VGG_Stride

    # Working with VGG only. RPN Model: input=Input(shape=(None, None, 3)), outputs=[x_class, x_regr, base_model.output]
    base_model = Models.vgg16_base(
        weight_regularizer=Models.WEIGHT_REGULARIZER,
        bias_regularizer=Models.BIAS_REGULARIZER)
    rpn_model = Models.vgg16_rpn(base_model,
                                 include_conv=False,
                                 weight_regularizer=Models.WEIGHT_REGULARIZER,
                                 bias_regularizer=Models.BIAS_REGULARIZER,
                                 anchors_per_loc=anchors_per_loc)

    # Detector Model: inputs=[base_model.input, roi_input], outputs=[out_class, out_reg]
    detector_base = Models.vgg16_base(
        weight_regularizer=Models.WEIGHT_REGULARIZER,
        bias_regularizer=Models.BIAS_REGULARIZER)
    detector_model = Models.vgg16_classifier(
        NUM_ROIS,
        num_classes,
        detector_base,
        weight_regularizer=Models.WEIGHT_REGULARIZER,
        bias_regularizer=Models.BIAS_REGULARIZER)

    # # this is a model that holds both the RPN and the classifier, used to load/save weights for the models
    # img_input = Input(shape=(None, None, 3))
    # roi_input = Input(shape=(None, 4), name='roi_input')
    # model_all = Model([img_input, roi_input], rpn_model.output[:2] + detector_model.output)

    rpn_save_weights_dest = "models/combinedTraining_rpn_weights_{}.h5".format(
        network)
    det_save_weights_dest = "models/combinedTraining_detector_weights_{}.h5".format(
        network)
    rpn_save_model_dest = "models/combinedTraining_rpn_model_{}.h5".format(
        network)
    det_save_model_dest = "models/combinedTraining_detector_model_{}.h5".format(
        network)
    record_path = "models/record.csv"

    rpn_training_manager = RpnTrainingManager(get_conv_rows_cols_func,
                                              stride,
                                              preprocess_func=preprocess_func,
                                              anchor_dims=anchors)
    det_training_manager = DetTrainingManager(rpn_model=rpn_model,
                                              class_mapping=class_mapping,
                                              preprocess_func=preprocess_func,
                                              num_rois=NUM_ROIS,
                                              stride=stride,
                                              anchor_dims=anchors)

    rpn_model, detector_model = combined_rpn_det_trainer(
        rpn_model,
        detector_model,
        processed_imgs,
        rpn_training_manager,
        det_training_manager,
        optimizer=optimizer,
        phases=phases,
        save_frequency=2000,
        rpn_save_weights_dest=rpn_save_weights_dest,
        det_save_weights_dest=det_save_weights_dest,
        recordCSV=record_df,
        record_path=record_path)

    # # Weights corresponding to minimum loss already getting saved in combined_rpn_det_trainer
    # rpn_model.save_weights(rpn_save_weights_dest)
    # print('Saved {} RPN weights to {}'.format(args.network, rpn_save_weights_dest))
    # detector_model.save_weights(det_save_weights_dest)
    # print('Saved {} DET weights to {}'.format(args.network, det_save_weights_dest))

    rpn_model.save(rpn_save_model_dest)
    print('Saved {} RPN model to {}'.format(network, rpn_save_model_dest))
    detector_model.save(det_save_model_dest)
    print('Saved {} DET model to {}'.format(network, det_save_model_dest))
    print("\n Training Complete.")

    print("Plotting Losses")
    plotLosses(record_path, r_epochs=40)