예제 #1
0
def compute_rpn_proposals(rpn_model, image_input, roi_input, dims_input, cfg):
    num_images = cfg["DATA"].NUM_TRAIN_IMAGES
    # Create the minibatch source
    od_minibatch_source = ObjectDetectionMinibatchSource(
        cfg["DATA"].TRAIN_MAP_FILE, cfg["DATA"].TRAIN_ROI_FILE,
        num_classes=cfg["DATA"].NUM_CLASSES,
        max_annotations_per_image=cfg.INPUT_ROIS_PER_IMAGE,
        pad_width=cfg.IMAGE_WIDTH,
        pad_height=cfg.IMAGE_HEIGHT,
        pad_value=cfg["MODEL"].IMG_PAD_COLOR,
        max_images=num_images,
        randomize=False, use_flipping=False,
        proposal_provider=None)

    # define mapping from reader streams to network inputs
    input_map = {
        od_minibatch_source.image_si: image_input,
        od_minibatch_source.roi_si: roi_input,
        od_minibatch_source.dims_si: dims_input
    }

    buffered_proposals = [None for _ in range(num_images)]
    sample_count = 0
    while sample_count < num_images:
        data = od_minibatch_source.next_minibatch(1, input_map=input_map)
        output = rpn_model.eval(data)
        out_dict = dict([(k.name, k) for k in output])
        out_rpn_rois = output[out_dict['rpn_rois']][0]
        buffered_proposals[sample_count] = np.round(out_rpn_rois).astype(np.int16)
        sample_count += 1
        if sample_count % 500 == 0:
            print("Buffered proposals for {} samples".format(sample_count))

    return buffered_proposals
예제 #2
0
def compute_rpn_proposals(rpn_model, image_input, roi_input, dims_input, cfg):
    num_images = cfg["DATA"].NUM_TRAIN_IMAGES
    # Create the minibatch source
    od_minibatch_source = ObjectDetectionMinibatchSource(
        cfg["DATA"].TRAIN_MAP_FILE, cfg["DATA"].TRAIN_ROI_FILE,
        num_classes=cfg["DATA"].NUM_CLASSES,
        max_annotations_per_image=cfg.INPUT_ROIS_PER_IMAGE,
        pad_width=cfg.IMAGE_WIDTH,
        pad_height=cfg.IMAGE_HEIGHT,
        pad_value=cfg["MODEL"].IMG_PAD_COLOR,
        max_images=num_images,
        randomize=False, use_flipping=False,
        proposal_provider=None)

    # define mapping from reader streams to network inputs
    input_map = {
        od_minibatch_source.image_si: image_input,
        od_minibatch_source.roi_si: roi_input,
        od_minibatch_source.dims_si: dims_input
    }

    buffered_proposals = [None for _ in range(num_images)]
    sample_count = 0
    while sample_count < num_images:
        data = od_minibatch_source.next_minibatch(1, input_map=input_map)
        output = rpn_model.eval(data)
        out_dict = dict([(k.name, k) for k in output])
        out_rpn_rois = output[out_dict['rpn_rois']][0]
        buffered_proposals[sample_count] = np.round(out_rpn_rois).astype(np.int16)
        sample_count += 1
        if sample_count % 500 == 0:
            print("Buffered proposals for {} samples".format(sample_count))

    return buffered_proposals
예제 #3
0
def create_train_reader(cfg):

    return ObjectDetectionMinibatchSource(
        cfg["DATA"].TRAIN_MAP_FILE,
        cfg["DATA"].TRAIN_ROI_FILE,
        num_classes=cfg["DATA"].NUM_CLASSES,
        max_annotations_per_image=cfg.INPUT_ROIS_PER_IMAGE,
        pad_width=cfg.IMAGE_WIDTH,
        pad_height=cfg.IMAGE_HEIGHT,
        pad_value=cfg["MODEL"].IMG_PAD_COLOR,
        randomize=True,
        use_flipping=cfg["TRAIN"].USE_FLIPPED,
        max_images=cfg["DATA"].NUM_TRAIN_IMAGES,
        proposal_provider=None)
예제 #4
0
def train_fast_rcnn(cfg):
    # Train only if no model exists yet
    model_path = cfg['MODEL_PATH']
    if os.path.exists(model_path) and cfg["CNTK"].MAKE_MODE:
        print("Loading existing model from %s" % model_path)
        return load_model(model_path)
    else:
        # Input variables denoting features and labeled ground truth rois (as 5-tuples per roi)
        image_input = input_variable(shape=(cfg.NUM_CHANNELS, cfg.IMAGE_HEIGHT,
                                            cfg.IMAGE_WIDTH),
                                     dynamic_axes=[Axis.default_batch_axis()],
                                     name=cfg["MODEL"].FEATURE_NODE_NAME)
        roi_proposals = input_variable(
            (cfg.NUM_ROI_PROPOSALS, 4),
            dynamic_axes=[Axis.default_batch_axis()],
            name="roi_proposals")
        label_targets = input_variable(
            (cfg.NUM_ROI_PROPOSALS, cfg["DATA"].NUM_CLASSES),
            dynamic_axes=[Axis.default_batch_axis()])
        bbox_targets = input_variable(
            (cfg.NUM_ROI_PROPOSALS, 4 * cfg["DATA"].NUM_CLASSES),
            dynamic_axes=[Axis.default_batch_axis()])
        bbox_inside_weights = input_variable(
            (cfg.NUM_ROI_PROPOSALS, 4 * cfg["DATA"].NUM_CLASSES),
            dynamic_axes=[Axis.default_batch_axis()])

        # Instantiate the Fast R-CNN prediction model and loss function
        loss, pred_error = create_fast_rcnn_model(image_input, roi_proposals,
                                                  label_targets, bbox_targets,
                                                  bbox_inside_weights, cfg)
        if isinstance(loss, cntk.Variable):
            loss = combine([loss])

        if cfg["CNTK"].DEBUG_OUTPUT:
            print("Storing graphs and models to %s." % cfg.OUTPUT_PATH)
            plot(
                loss,
                os.path.join(cfg.OUTPUT_PATH,
                             "graph_frcn_train." + cfg["CNTK"].GRAPH_TYPE))

        # Set learning parameters
        lr_factor = cfg["CNTK"].LR_FACTOR
        lr_per_sample_scaled = [
            x * lr_factor for x in cfg["CNTK"].LR_PER_SAMPLE
        ]
        mm_schedule = momentum_schedule(cfg["CNTK"].MOMENTUM_PER_MB)
        l2_reg_weight = cfg["CNTK"].L2_REG_WEIGHT
        epochs_to_train = cfg["CNTK"].MAX_EPOCHS

        print("Using base model:   {}".format(cfg["MODEL"].BASE_MODEL))
        print("lr_per_sample:      {}".format(lr_per_sample_scaled))

        # --- train ---
        # Instantiate the learners and the trainer object
        params = loss.parameters
        biases = [p for p in params if '.b' in p.name or 'b' == p.name]
        others = [p for p in params if not p in biases]
        bias_lr_mult = cfg["CNTK"].BIAS_LR_MULT
        lr_schedule = learning_parameter_schedule_per_sample(
            lr_per_sample_scaled)
        learner = momentum_sgd(others,
                               lr_schedule,
                               mm_schedule,
                               l2_regularization_weight=l2_reg_weight,
                               unit_gain=False,
                               use_mean_gradient=True)

        bias_lr_per_sample = [
            v * bias_lr_mult for v in cfg["CNTK"].LR_PER_SAMPLE
        ]
        bias_lr_schedule = learning_parameter_schedule_per_sample(
            bias_lr_per_sample)
        bias_learner = momentum_sgd(biases,
                                    bias_lr_schedule,
                                    mm_schedule,
                                    l2_regularization_weight=l2_reg_weight,
                                    unit_gain=False,
                                    use_mean_gradient=True)
        trainer = Trainer(None, (loss, pred_error), [learner, bias_learner])

        # Get minibatches of images and perform model training
        print("Training model for %s epochs." % epochs_to_train)
        log_number_of_parameters(loss)

        # Create the minibatch source
        if cfg.USE_PRECOMPUTED_PROPOSALS:
            proposal_provider = ProposalProvider.fromfile(
                cfg["DATA"].TRAIN_PRECOMPUTED_PROPOSALS_FILE,
                cfg.NUM_ROI_PROPOSALS)
        else:
            proposal_provider = ProposalProvider.fromconfig(cfg)

        od_minibatch_source = ObjectDetectionMinibatchSource(
            cfg["DATA"].TRAIN_MAP_FILE,
            cfg["DATA"].TRAIN_ROI_FILE,
            max_annotations_per_image=cfg.INPUT_ROIS_PER_IMAGE,
            pad_width=cfg.IMAGE_WIDTH,
            pad_height=cfg.IMAGE_HEIGHT,
            pad_value=cfg["MODEL"].IMG_PAD_COLOR,
            randomize=True,
            use_flipping=cfg["TRAIN"].USE_FLIPPED,
            max_images=cfg["DATA"].NUM_TRAIN_IMAGES,
            num_classes=cfg["DATA"].NUM_CLASSES,
            proposal_provider=proposal_provider,
            provide_targets=True,
            proposal_iou_threshold=cfg.BBOX_THRESH,
            normalize_means=None
            if not cfg.BBOX_NORMALIZE_TARGETS else cfg.BBOX_NORMALIZE_MEANS,
            normalize_stds=None
            if not cfg.BBOX_NORMALIZE_TARGETS else cfg.BBOX_NORMALIZE_STDS)

        # define mapping from reader streams to network inputs
        input_map = {
            od_minibatch_source.image_si: image_input,
            od_minibatch_source.proposals_si: roi_proposals,
            od_minibatch_source.label_targets_si: label_targets,
            od_minibatch_source.bbox_targets_si: bbox_targets,
            od_minibatch_source.bbiw_si: bbox_inside_weights
        }

        progress_printer = ProgressPrinter(tag='Training',
                                           num_epochs=epochs_to_train,
                                           gen_heartbeat=True)
        for epoch in range(epochs_to_train):  # loop over epochs
            sample_count = 0
            while sample_count < cfg[
                    "DATA"].NUM_TRAIN_IMAGES:  # loop over minibatches in the epoch
                data = od_minibatch_source.next_minibatch(min(
                    cfg.MB_SIZE, cfg["DATA"].NUM_TRAIN_IMAGES - sample_count),
                                                          input_map=input_map)

                trainer.train_minibatch(data)  # update model with it
                sample_count += trainer.previous_minibatch_sample_count  # count samples processed so far
                progress_printer.update_with_trainer(
                    trainer, with_metric=True)  # log progress
                if sample_count % 100 == 0:
                    continue
                    #print("Processed {} samples".format(sample_count))

            progress_printer.epoch_summary(with_metric=True)

        eval_model = create_fast_rcnn_eval_model(loss, image_input,
                                                 roi_proposals, cfg)
        eval_model.save(cfg['MODEL_PATH'])
        return eval_model
예제 #5
0
def train_model(image_input, roi_input, dims_input, loss, pred_error,
                lr_per_sample, mm_schedule, l2_reg_weight, epochs_to_train, cfg,
                rpn_rois_input=None, buffered_rpn_proposals=None):
    if isinstance(loss, cntk.Variable):
        loss = combine([loss])

    params = loss.parameters
    biases = [p for p in params if '.b' in p.name or 'b' == p.name]
    others = [p for p in params if not p in biases]
    bias_lr_mult = cfg["CNTK"].BIAS_LR_MULT

    if cfg["CNTK"].DEBUG_OUTPUT:
        print("biases")
        for p in biases: print(p)
        print("others")
        for p in others: print(p)
        print("bias_lr_mult: {}".format(bias_lr_mult))

    # Instantiate the learners and the trainer object
    lr_schedule = learning_parameter_schedule_per_sample(lr_per_sample)
    learner = momentum_sgd(others, lr_schedule, mm_schedule, l2_regularization_weight=l2_reg_weight,
                           unit_gain=False, use_mean_gradient=True)

    bias_lr_per_sample = [v * bias_lr_mult for v in lr_per_sample]
    bias_lr_schedule = learning_parameter_schedule_per_sample(bias_lr_per_sample)
    bias_learner = momentum_sgd(biases, bias_lr_schedule, mm_schedule, l2_regularization_weight=l2_reg_weight,
                           unit_gain=False, use_mean_gradient=True)
    trainer = Trainer(None, (loss, pred_error), [learner, bias_learner])

    # Get minibatches of images and perform model training
    print("Training model for %s epochs." % epochs_to_train)
    log_number_of_parameters(loss)

    # Create the minibatch source
    if buffered_rpn_proposals is not None:
        proposal_provider = ProposalProvider.fromlist(buffered_rpn_proposals, requires_scaling=False)
    else:
        proposal_provider = None

    od_minibatch_source = ObjectDetectionMinibatchSource(
        cfg["DATA"].TRAIN_MAP_FILE, cfg["DATA"].TRAIN_ROI_FILE,
        num_classes=cfg["DATA"].NUM_CLASSES,
        max_annotations_per_image=cfg.INPUT_ROIS_PER_IMAGE,
        pad_width=cfg.IMAGE_WIDTH,
        pad_height=cfg.IMAGE_HEIGHT,
        pad_value=cfg["MODEL"].IMG_PAD_COLOR,
        randomize=True,
        use_flipping=cfg["TRAIN"].USE_FLIPPED,
        max_images=cfg["DATA"].NUM_TRAIN_IMAGES,
        proposal_provider=proposal_provider)

    # define mapping from reader streams to network inputs
    input_map = {
        od_minibatch_source.image_si: image_input,
        od_minibatch_source.roi_si: roi_input,
    }
    if buffered_rpn_proposals is not None:
        input_map[od_minibatch_source.proposals_si] = rpn_rois_input
    else:
        input_map[od_minibatch_source.dims_si] = dims_input

    progress_printer = ProgressPrinter(tag='Training', num_epochs=epochs_to_train, gen_heartbeat=True)
    for epoch in range(epochs_to_train):       # loop over epochs
        sample_count = 0
        while sample_count < cfg["DATA"].NUM_TRAIN_IMAGES:  # loop over minibatches in the epoch
            data = od_minibatch_source.next_minibatch(min(cfg.MB_SIZE, cfg["DATA"].NUM_TRAIN_IMAGES-sample_count), input_map=input_map)
            trainer.train_minibatch(data)                                    # update model with it
            sample_count += trainer.previous_minibatch_sample_count          # count samples processed so far
            #progress_printer.update_with_trainer(trainer, with_metric=True)  # log progress
            if sample_count % 100 == 0:
                continue
                #print("Processed {} samples".format(sample_count))

        progress_printer.epoch_summary(with_metric=True)
예제 #6
0
def compute_test_set_aps(eval_model, cfg):
    num_test_images = cfg["DATA"].NUM_TEST_IMAGES
    classes = cfg["DATA"].CLASSES
    image_input = input_variable(shape=(cfg.NUM_CHANNELS, cfg.IMAGE_HEIGHT,
                                        cfg.IMAGE_WIDTH),
                                 dynamic_axes=[Axis.default_batch_axis()],
                                 name=cfg["MODEL"].FEATURE_NODE_NAME)
    roi_input = input_variable((cfg.INPUT_ROIS_PER_IMAGE, 5),
                               dynamic_axes=[Axis.default_batch_axis()])
    dims_input = input_variable((6), dynamic_axes=[Axis.default_batch_axis()])
    frcn_eval = eval_model(image_input, dims_input)

    # Create the minibatch source
    minibatch_source = ObjectDetectionMinibatchSource(
        cfg["DATA"].TEST_MAP_FILE,
        cfg["DATA"].TEST_ROI_FILE,
        max_annotations_per_image=cfg.INPUT_ROIS_PER_IMAGE,
        pad_width=cfg.IMAGE_WIDTH,
        pad_height=cfg.IMAGE_HEIGHT,
        pad_value=cfg["MODEL"].IMG_PAD_COLOR,
        randomize=False,
        use_flipping=False,
        max_images=cfg["DATA"].NUM_TEST_IMAGES,
        num_classes=cfg["DATA"].NUM_CLASSES,
        proposal_provider=None)

    # define mapping from reader streams to network inputs
    input_map = {
        minibatch_source.image_si: image_input,
        minibatch_source.roi_si: roi_input,
        minibatch_source.dims_si: dims_input
    }

    # all detections are collected into:
    #    all_boxes[cls][image] = N x 5 array of detections in (x1, y1, x2, y2, score)
    all_boxes = [[[] for _ in range(num_test_images)]
                 for _ in range(cfg["DATA"].NUM_CLASSES)]

    # evaluate test images and write netwrok output to file
    print("Evaluating Faster R-CNN model for %s images." % num_test_images)
    all_gt_infos = {key: [] for key in classes}
    for img_i in range(0, num_test_images):
        mb_data = minibatch_source.next_minibatch(1, input_map=input_map)

        gt_row = mb_data[roi_input].asarray()
        gt_row = gt_row.reshape((cfg.INPUT_ROIS_PER_IMAGE, 5))
        all_gt_boxes = gt_row[np.where(gt_row[:, -1] > 0)]

        for cls_index, cls_name in enumerate(classes):
            if cls_index == 0: continue
            cls_gt_boxes = all_gt_boxes[np.where(
                all_gt_boxes[:, -1] == cls_index)]
            all_gt_infos[cls_name].append({
                'bbox':
                np.array(cls_gt_boxes),
                'difficult': [False] * len(cls_gt_boxes),
                'det': [False] * len(cls_gt_boxes)
            })

        output = frcn_eval.eval({
            image_input: mb_data[image_input],
            dims_input: mb_data[dims_input]
        })
        out_dict = dict([(k.name, k) for k in output])
        out_cls_pred = output[out_dict['cls_pred']][0]
        out_rpn_rois = output[out_dict['rpn_rois']][0]
        out_bbox_regr = output[out_dict['bbox_regr']][0]

        labels = out_cls_pred.argmax(axis=1)
        scores = out_cls_pred.max(axis=1)
        regressed_rois = regress_rois(out_rpn_rois, out_bbox_regr, labels,
                                      mb_data[dims_input].asarray())

        labels.shape = labels.shape + (1, )
        scores.shape = scores.shape + (1, )
        coords_score_label = np.hstack((regressed_rois, scores, labels))

        #   shape of all_boxes: e.g. 21 classes x 4952 images x 58 rois x 5 coords+score
        for cls_j in range(1, cfg["DATA"].NUM_CLASSES):
            coords_score_label_for_cls = coords_score_label[np.where(
                coords_score_label[:, -1] == cls_j)]
            all_boxes[cls_j][
                img_i] = coords_score_label_for_cls[:, :-1].astype(np.float32,
                                                                   copy=False)

        if (img_i + 1) % 100 == 0:
            print("Processed {} samples".format(img_i + 1))

    # calculate mAP
    aps = evaluate_detections(all_boxes,
                              all_gt_infos,
                              classes,
                              use_gpu_nms=cfg.USE_GPU_NMS,
                              device_id=cfg.GPU_ID,
                              nms_threshold=cfg.RESULTS_NMS_THRESHOLD,
                              conf_threshold=cfg.RESULTS_NMS_CONF_THRESHOLD)

    return aps
예제 #7
0
def train_fast_rcnn(cfg):
    # Train only if no model exists yet
    model_path = cfg['MODEL_PATH']
    if os.path.exists(model_path) and cfg["CNTK"].MAKE_MODE:
        print("Loading existing model from %s" % model_path)
        return load_model(model_path)
    else:
        # Input variables denoting features and labeled ground truth rois (as 5-tuples per roi)
        image_input = input_variable(shape=(cfg.NUM_CHANNELS, cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH),
                                     dynamic_axes=[Axis.default_batch_axis()],
                                     name=cfg["MODEL"].FEATURE_NODE_NAME)
        roi_proposals = input_variable((cfg.NUM_ROI_PROPOSALS, 4), dynamic_axes=[Axis.default_batch_axis()], name = "roi_proposals")
        label_targets = input_variable((cfg.NUM_ROI_PROPOSALS, cfg["DATA"].NUM_CLASSES), dynamic_axes=[Axis.default_batch_axis()])
        bbox_targets = input_variable((cfg.NUM_ROI_PROPOSALS, 4*cfg["DATA"].NUM_CLASSES), dynamic_axes=[Axis.default_batch_axis()])
        bbox_inside_weights = input_variable((cfg.NUM_ROI_PROPOSALS, 4*cfg["DATA"].NUM_CLASSES), dynamic_axes=[Axis.default_batch_axis()])

        # Instantiate the Fast R-CNN prediction model and loss function
        loss, pred_error = create_fast_rcnn_model(image_input, roi_proposals, label_targets, bbox_targets, bbox_inside_weights, cfg)
        if isinstance(loss, cntk.Variable):
            loss = combine([loss])

        if cfg["CNTK"].DEBUG_OUTPUT:
            print("Storing graphs and models to %s." % cfg.OUTPUT_PATH)
            plot(loss, os.path.join(cfg.OUTPUT_PATH, "graph_frcn_train." + cfg["CNTK"].GRAPH_TYPE))

        # Set learning parameters
        lr_factor = cfg["CNTK"].LR_FACTOR
        lr_per_sample_scaled = [x * lr_factor for x in cfg["CNTK"].LR_PER_SAMPLE]
        mm_schedule = momentum_schedule(cfg["CNTK"].MOMENTUM_PER_MB)
        l2_reg_weight = cfg["CNTK"].L2_REG_WEIGHT
        epochs_to_train = cfg["CNTK"].MAX_EPOCHS

        print("Using base model:   {}".format(cfg["MODEL"].BASE_MODEL))
        print("lr_per_sample:      {}".format(lr_per_sample_scaled))

        # --- train ---
        # Instantiate the learners and the trainer object
        params = loss.parameters
        biases = [p for p in params if '.b' in p.name or 'b' == p.name]
        others = [p for p in params if not p in biases]
        bias_lr_mult = cfg["CNTK"].BIAS_LR_MULT
        lr_schedule = learning_rate_schedule(lr_per_sample_scaled, unit=UnitType.sample)
        learner = momentum_sgd(others, lr_schedule, mm_schedule, l2_regularization_weight=l2_reg_weight, unit_gain=False, use_mean_gradient=True)

        bias_lr_per_sample = [v * bias_lr_mult for v in cfg["CNTK"].LR_PER_SAMPLE]
        bias_lr_schedule = learning_rate_schedule(bias_lr_per_sample, unit=UnitType.sample)
        bias_learner = momentum_sgd(biases, bias_lr_schedule, mm_schedule, l2_regularization_weight=l2_reg_weight, unit_gain=False, use_mean_gradient=True)
        trainer = Trainer(None, (loss, pred_error), [learner, bias_learner])

        # Get minibatches of images and perform model training
        print("Training model for %s epochs." % epochs_to_train)
        log_number_of_parameters(loss)

        # Create the minibatch source
        if cfg.USE_PRECOMPUTED_PROPOSALS:
            proposal_provider = ProposalProvider.fromfile(cfg["DATA"].TRAIN_PRECOMPUTED_PROPOSALS_FILE, cfg.NUM_ROI_PROPOSALS)
        else:
            proposal_provider = ProposalProvider.fromconfig(cfg)

        od_minibatch_source = ObjectDetectionMinibatchSource(
            cfg["DATA"].TRAIN_MAP_FILE, cfg["DATA"].TRAIN_ROI_FILE,
            max_annotations_per_image=cfg.INPUT_ROIS_PER_IMAGE,
            pad_width=cfg.IMAGE_WIDTH,
            pad_height=cfg.IMAGE_HEIGHT,
            pad_value=cfg["MODEL"].IMG_PAD_COLOR,
            randomize=True,
            use_flipping=cfg["TRAIN"].USE_FLIPPED,
            max_images=cfg["DATA"].NUM_TRAIN_IMAGES,
            num_classes=cfg["DATA"].NUM_CLASSES,
            proposal_provider=proposal_provider,
            provide_targets=True,
            proposal_iou_threshold = cfg.BBOX_THRESH,
            normalize_means = None if not cfg.BBOX_NORMALIZE_TARGETS else cfg.BBOX_NORMALIZE_MEANS,
            normalize_stds = None if not cfg.BBOX_NORMALIZE_TARGETS else cfg.BBOX_NORMALIZE_STDS)

        # define mapping from reader streams to network inputs
        input_map = {
            od_minibatch_source.image_si: image_input,
            od_minibatch_source.proposals_si: roi_proposals,
            od_minibatch_source.label_targets_si: label_targets,
            od_minibatch_source.bbox_targets_si: bbox_targets,
            od_minibatch_source.bbiw_si: bbox_inside_weights
        }

        progress_printer = ProgressPrinter(tag='Training', num_epochs=epochs_to_train, gen_heartbeat=True)
        for epoch in range(epochs_to_train):  # loop over epochs
            sample_count = 0
            while sample_count < cfg["DATA"].NUM_TRAIN_IMAGES:  # loop over minibatches in the epoch
                data = od_minibatch_source.next_minibatch(min(cfg.MB_SIZE, cfg["DATA"].NUM_TRAIN_IMAGES - sample_count), input_map=input_map)

                trainer.train_minibatch(data)  # update model with it
                sample_count += trainer.previous_minibatch_sample_count  # count samples processed so far
                progress_printer.update_with_trainer(trainer, with_metric=True)  # log progress
                if sample_count % 100 == 0:
                    print("Processed {} samples".format(sample_count))

            progress_printer.epoch_summary(with_metric=True)

        eval_model = create_fast_rcnn_eval_model(loss, image_input, roi_proposals, cfg)
        eval_model.save(cfg['MODEL_PATH'])
        return eval_model
예제 #8
0
def train_model(image_input, roi_input, dims_input, loss, pred_error,
                lr_per_sample, mm_schedule, l2_reg_weight, epochs_to_train, cfg,
                rpn_rois_input=None, buffered_rpn_proposals=None):
    if isinstance(loss, cntk.Variable):
        loss = combine([loss])

    params = loss.parameters
    biases = [p for p in params if '.b' in p.name or 'b' == p.name]
    others = [p for p in params if not p in biases]
    bias_lr_mult = cfg["CNTK"].BIAS_LR_MULT

    if cfg["CNTK"].DEBUG_OUTPUT:
        print("biases")
        for p in biases: print(p)
        print("others")
        for p in others: print(p)
        print("bias_lr_mult: {}".format(bias_lr_mult))

    # Instantiate the learners and the trainer object
    lr_schedule = learning_parameter_schedule_per_sample(lr_per_sample)
    learner = momentum_sgd(others, lr_schedule, mm_schedule, l2_regularization_weight=l2_reg_weight,
                           unit_gain=False, use_mean_gradient=True)

    bias_lr_per_sample = [v * bias_lr_mult for v in lr_per_sample]
    bias_lr_schedule = learning_parameter_schedule_per_sample(bias_lr_per_sample)
    bias_learner = momentum_sgd(biases, bias_lr_schedule, mm_schedule, l2_regularization_weight=l2_reg_weight,
                           unit_gain=False, use_mean_gradient=True)
    trainer = Trainer(None, (loss, pred_error), [learner, bias_learner])

    # Get minibatches of images and perform model training
    print("Training model for %s epochs." % epochs_to_train)
    log_number_of_parameters(loss)

    # Create the minibatch source
    if buffered_rpn_proposals is not None:
        proposal_provider = ProposalProvider.fromlist(buffered_rpn_proposals, requires_scaling=False)
    else:
        proposal_provider = None

    od_minibatch_source = ObjectDetectionMinibatchSource(
        cfg["DATA"].TRAIN_MAP_FILE, cfg["DATA"].TRAIN_ROI_FILE,
        num_classes=cfg["DATA"].NUM_CLASSES,
        max_annotations_per_image=cfg.INPUT_ROIS_PER_IMAGE,
        pad_width=cfg.IMAGE_WIDTH,
        pad_height=cfg.IMAGE_HEIGHT,
        pad_value=cfg["MODEL"].IMG_PAD_COLOR,
        randomize=True,
        use_flipping=cfg["TRAIN"].USE_FLIPPED,
        max_images=cfg["DATA"].NUM_TRAIN_IMAGES,
        proposal_provider=proposal_provider)

    # define mapping from reader streams to network inputs
    input_map = {
        od_minibatch_source.image_si: image_input,
        od_minibatch_source.roi_si: roi_input,
    }
    if buffered_rpn_proposals is not None:
        input_map[od_minibatch_source.proposals_si] = rpn_rois_input
    else:
        input_map[od_minibatch_source.dims_si] = dims_input

    progress_printer = ProgressPrinter(tag='Training', num_epochs=epochs_to_train, gen_heartbeat=True)
    for epoch in range(epochs_to_train):       # loop over epochs
        sample_count = 0
        while sample_count < cfg["DATA"].NUM_TRAIN_IMAGES:  # loop over minibatches in the epoch
            data = od_minibatch_source.next_minibatch(min(cfg.MB_SIZE, cfg["DATA"].NUM_TRAIN_IMAGES-sample_count), input_map=input_map)
            trainer.train_minibatch(data)                                    # update model with it
            sample_count += trainer.previous_minibatch_sample_count          # count samples processed so far
            progress_printer.update_with_trainer(trainer, with_metric=True)  # log progress
            if sample_count % 100 == 0:
                print("Processed {} samples".format(sample_count))

        progress_printer.epoch_summary(with_metric=True)
예제 #9
0
def compute_test_set_aps(eval_model, cfg):
    num_test_images = cfg["DATA"].NUM_TEST_IMAGES
    classes = cfg["DATA"].CLASSES
    image_input = input_variable(shape=(cfg.NUM_CHANNELS, cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH),
                                 dynamic_axes=[Axis.default_batch_axis()],
                                 name=cfg["MODEL"].FEATURE_NODE_NAME)
    roi_input = input_variable((cfg.INPUT_ROIS_PER_IMAGE, 5), dynamic_axes=[Axis.default_batch_axis()])
    roi_proposals = input_variable((cfg.NUM_ROI_PROPOSALS, 4), dynamic_axes=[Axis.default_batch_axis()], name="roi_proposals")
    dims_input = input_variable((6), dynamic_axes=[Axis.default_batch_axis()])
    frcn_eval = eval_model(image_input, roi_proposals)

    # Create the minibatch source
    if cfg.USE_PRECOMPUTED_PROPOSALS:
        try:
            cfg["DATA"].TEST_PRECOMPUTED_PROPOSALS_FILE = os.path.join(cfg["DATA"].MAP_FILE_PATH, cfg["DATA"].TEST_PRECOMPUTED_PROPOSALS_FILE)
            proposal_provider = ProposalProvider.fromfile(cfg["DATA"].TEST_PRECOMPUTED_PROPOSALS_FILE, cfg.NUM_ROI_PROPOSALS)
        except:
            print("To use precomputed proposals please specify the following parameters in your configuration:\n"
                  "__C.DATA.TRAIN_PRECOMPUTED_PROPOSALS_FILE\n"
                  "__C.DATA.TEST_PRECOMPUTED_PROPOSALS_FILE")
            exit(-1)
    else:
        proposal_provider = ProposalProvider.fromconfig(cfg)

    minibatch_source = ObjectDetectionMinibatchSource(
        cfg["DATA"].TEST_MAP_FILE,
        cfg["DATA"].TEST_ROI_FILE,
        max_annotations_per_image=cfg.INPUT_ROIS_PER_IMAGE,
        pad_width=cfg.IMAGE_WIDTH,
        pad_height=cfg.IMAGE_HEIGHT,
        pad_value=cfg["MODEL"].IMG_PAD_COLOR,
        randomize=False, use_flipping=False,
        max_images=cfg["DATA"].NUM_TEST_IMAGES,
        num_classes=cfg["DATA"].NUM_CLASSES,
        proposal_provider=proposal_provider,
        provide_targets=False)

    # define mapping from reader streams to network inputs
    input_map = {
        minibatch_source.image_si: image_input,
        minibatch_source.roi_si: roi_input,
        minibatch_source.proposals_si: roi_proposals,
        minibatch_source.dims_si: dims_input
    }

    # all detections are collected into:
    #    all_boxes[cls][image] = N x 5 array of detections in (x1, y1, x2, y2, score)
    all_boxes = [[[] for _ in range(num_test_images)] for _ in range(cfg["DATA"].NUM_CLASSES)]

    # evaluate test images and write netwrok output to file
    print("Evaluating Fast R-CNN model for %s images." % num_test_images)
    all_gt_infos = {key: [] for key in classes}
    for img_i in range(0, num_test_images):
        mb_data = minibatch_source.next_minibatch(1, input_map=input_map)

        gt_row = mb_data[roi_input].asarray()
        gt_row = gt_row.reshape((cfg.INPUT_ROIS_PER_IMAGE, 5))
        all_gt_boxes = gt_row[np.where(gt_row[:,-1] > 0)]

        for cls_index, cls_name in enumerate(classes):
            if cls_index == 0: continue
            cls_gt_boxes = all_gt_boxes[np.where(all_gt_boxes[:,-1] == cls_index)]
            all_gt_infos[cls_name].append({'bbox': np.array(cls_gt_boxes),
                                           'difficult': [False] * len(cls_gt_boxes),
                                           'det': [False] * len(cls_gt_boxes)})

        output = frcn_eval.eval({image_input: mb_data[image_input], roi_proposals: mb_data[roi_proposals]})
        out_dict = dict([(k.name, k) for k in output])
        out_cls_pred = output[out_dict['cls_pred']][0]
        out_rpn_rois = mb_data[roi_proposals].data.asarray()
        out_bbox_regr = output[out_dict['bbox_regr']][0]

        labels = out_cls_pred.argmax(axis=1)
        scores = out_cls_pred.max(axis=1)
        regressed_rois = regress_rois(out_rpn_rois, out_bbox_regr, labels, mb_data[dims_input].asarray())

        labels.shape = labels.shape + (1,)
        scores.shape = scores.shape + (1,)
        coords_score_label = np.hstack((regressed_rois, scores, labels))

        #   shape of all_boxes: e.g. 21 classes x 4952 images x 58 rois x 5 coords+score
        for cls_j in range(1, cfg["DATA"].NUM_CLASSES):
            coords_score_label_for_cls = coords_score_label[np.where(coords_score_label[:,-1] == cls_j)]
            all_boxes[cls_j][img_i] = coords_score_label_for_cls[:,:-1].astype(np.float32, copy=False)

        if (img_i+1) % 100 == 0:
            print("Processed {} samples".format(img_i+1))

    # calculate mAP
    aps = evaluate_detections(all_boxes, all_gt_infos, classes,
                              use_gpu_nms = cfg.USE_GPU_NMS,
                              device_id = cfg.GPU_ID,
                              nms_threshold=cfg.RESULTS_NMS_THRESHOLD,
                              conf_threshold = cfg.RESULTS_NMS_CONF_THRESHOLD)

    return aps