コード例 #1
0
def predict_single_image(img_path, model_rpn, model_classifier_only,
                         class_mapping):
    img = cv2.imread(img_path)
    if img is None:
        print('reading image failed.')
        exit(0)

    X, ratio = format_img(img)
    if K.image_dim_ordering() == 'tf':
        X = np.transpose(X, (0, 2, 3, 1))
    # get the feature maps and output from the RPN
    [Y1, Y2, F] = model_rpn.predict(X)

    result = rpn_to_roi(Y1, Y2, K.image_dim_ordering(), overlap_thresh=0.7)

    # convert from (x1,y1,x2,y2) to (x,y,w,h)
    result[:, 2] -= result[:, 0]
    result[:, 3] -= result[:, 1]
    bbox_threshold = 0.7

    # apply the spatial pyramid pooling to the proposed regions
    boxes = dict()
    for jk in range(result.shape[0] // 32 + 1):
        rois = np.expand_dims(result[32 * jk:32 * (jk + 1), :], axis=0)
        if rois.shape[1] == 0:
            break
        if jk == result.shape[0] // 32:
            # pad R
            curr_shape = rois.shape
            target_shape = (curr_shape[0], 32, curr_shape[2])
            rois_padded = np.zeros(target_shape).astype(rois.dtype)
            rois_padded[:, :curr_shape[1], :] = rois
            rois_padded[0, curr_shape[1]:, :] = rois[0, 0, :]
            rois = rois_padded

        [p_cls, p_regr] = model_classifier_only.predict([F, rois])

        for ii in range(p_cls.shape[1]):
            if np.max(p_cls[0, ii, :]) < bbox_threshold or np.argmax(
                    p_cls[0, ii, :]) == (p_cls.shape[2] - 1):
                continue

            cls_num = np.argmax(p_cls[0, ii, :])
            if cls_num not in boxes.keys():
                boxes[cls_num] = []
            (x, y, w, h) = rois[0, ii, :]
            try:
                (tx, ty, tw, th) = p_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)]
                tx /= 8.0
                ty /= 8.0
                tw /= 4.0
                th /= 4.0
                x, y, w, h = apply_regr(x, y, w, h, tx, ty, tw, th)
            except Exception as e:
                print(e)
                pass
            boxes[cls_num].append([
                16 * x, 16 * y, 16 * (x + w), 16 * (y + h),
                np.max(p_cls[0, ii, :])
            ])

    # add some nms to reduce many boxes
    for cls_num, box in boxes.items():
        boxes_nms = non_max_suppression_fast(box, overlap_thresh=0.5)
        boxes[cls_num] = boxes_nms
        #print(class_mapping[cls_num] + ":")
        for b in boxes_nms:
            b[0], b[1], b[2], b[3] = get_real_coordinates(
                ratio, b[0], b[1], b[2], b[3])
            f.write(",".join([
                img_path.split("/")[-1].split(".")[0], class_mapping[cls_num],
                str(b[-1]),
                str(b[0]),
                str(b[1]),
                str(b[2]),
                str(b[3])
            ]) + "\n")

    img = draw_boxes_and_label_on_image_cv2(img, class_mapping, boxes)
    result_path = './resnet_aug_results_images/{}.jpg'.format(
        os.path.basename(img_path).split('.')[0])
    cv2.imwrite(result_path, img)
コード例 #2
0
                    rpn_accuracy_rpn_monitor = []
                    print(
                        'Average number of overlapping bounding boxes from RPN = {} for {} previous iterations'
                        .format(mean_overlapping_bboxes, epoch_length))
                    if mean_overlapping_bboxes == 0:
                        print(
                            'RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.'
                        )

                X, Y, img_data = next(data_gen_train)
                loss_rpn = model_rpn.train_on_batch(X, Y)
                P_rpn = model_rpn.predict_on_batch(X)
                R = roi_helpers.rpn_to_roi(P_rpn[0],
                                           P_rpn[1],
                                           C,
                                           K.image_dim_ordering(),
                                           use_regr=True,
                                           overlap_thresh=0.7,
                                           max_boxes=300)

                # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format
                X2, Y1, Y2 = roi_helpers.calc_iou(R, img_data, C,
                                                  class_mapping)

                neg_samples = np.where(Y1[0, :, -1] == 1)
                pos_samples = np.where(Y1[0, :, -1] == 0)

                if len(neg_samples) > 0:
                    neg_samples = neg_samples[0]
                else:
                    neg_samples = []
コード例 #3
0
def detect_predict(pic,
                   C,
                   model_rpn,
                   model_classifier,
                   model_classifier_only,
                   class_mapping,
                   class_to_color,
                   print_dets=False):
    """
    Detect and predict object in the picture
    :param pic: picture numpy array
    :param C: config object
    :params model_*: models from get_models function
    :params class_*: mapping and colors, need to be loaded to keep the same colors/classes 
    :return: picture with bounding boxes 
    """
    img = pic
    X, ratio = format_img(img, C)

    img_scaled = np.transpose(X.copy()[0, (2, 1, 0), :, :], (1, 2, 0)).copy()
    img_scaled[:, :, 0] += 123.68
    img_scaled[:, :, 1] += 116.779
    img_scaled[:, :, 2] += 103.939
    img_scaled = img_scaled.astype(np.uint8)

    if K.image_dim_ordering() == 'tf':
        X = np.transpose(X, (0, 2, 3, 1))

    # get the feature maps and output from the RPN
    [Y1, Y2, F] = model_rpn.predict(X)

    R = roi_helpers.rpn_to_roi(Y1,
                               Y2,
                               C,
                               K.image_dim_ordering(),
                               overlap_thresh=0.7)

    # convert from (x1,y1,x2,y2) to (x,y,w,h)
    R[:, 2] -= R[:, 0]
    R[:, 3] -= R[:, 1]

    # apply the spatial pyramid pooling to the proposed regions
    bboxes = {}
    probs = {}
    # print(class_mapping)
    for jk in range(R.shape[0] // C.num_rois + 1):
        ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :],
                              axis=0)
        if ROIs.shape[1] == 0:
            break

        if jk == R.shape[0] // C.num_rois:
            #pad R
            curr_shape = ROIs.shape
            target_shape = (curr_shape[0], C.num_rois, curr_shape[2])
            ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype)
            ROIs_padded[:, :curr_shape[1], :] = ROIs
            ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :]
            ROIs = ROIs_padded

        [P_cls, P_regr] = model_classifier_only.predict([F, ROIs])

        for ii in range(P_cls.shape[1]):

            if np.max(P_cls[0, ii, :]) < bbox_threshold or np.argmax(
                    P_cls[0, ii, :]) == (P_cls.shape[2] - 1):
                continue

            cls_name = class_mapping[np.argmax(P_cls[0, ii, :])]

            if cls_name not in bboxes:
                bboxes[cls_name] = []
                probs[cls_name] = []

            (x, y, w, h) = ROIs[0, ii, :]

            cls_num = np.argmax(P_cls[0, ii, :])
            try:
                (tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)]
                tx /= C.classifier_regr_std[0]
                ty /= C.classifier_regr_std[1]
                tw /= C.classifier_regr_std[2]
                th /= C.classifier_regr_std[3]
                x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th)
            except:
                pass
            bboxes[cls_name].append([
                C.rpn_stride * x, C.rpn_stride * y, C.rpn_stride * (x + w),
                C.rpn_stride * (y + h)
            ])
            probs[cls_name].append(np.max(P_cls[0, ii, :]))

    all_dets = []
    for key in bboxes:
        bbox = np.array(bboxes[key])

        new_boxes, new_probs = roi_helpers.non_max_suppression_fast(
            bbox, np.array(probs[key]), overlap_thresh=overlap_thresh)
        jk = np.argmax(new_probs)
        if new_probs[jk] > 0.55:
            (x1, y1, x2, y2) = new_boxes[jk, :]

            (real_x1, real_y1, real_x2,
             real_y2) = get_real_coordinates(ratio, x1, y1, x2, y2)

            cv2.rectangle(
                img, (real_x1, real_y1), (real_x2, real_y2),
                (int(class_to_color[key][0]), int(
                    class_to_color[key][1]), int(class_to_color[key][2])), 2)

            textLabel = '{}: {}%'.format(key, int(100 * new_probs[jk]))
            all_dets.append((key, 100 * new_probs[jk]))

            (retval, baseLine) = cv2.getTextSize(textLabel,
                                                 cv2.FONT_HERSHEY_COMPLEX, 1,
                                                 1)

            # To avoid putting text outside the frame
            # replace the legende if the box is outside the image
            if real_y1 < 20 and real_y2 < img.shape[0]:
                textOrg = (real_x1, real_y2 + 5)

            elif real_y1 < 20 and real_y2 > img.shape[0]:
                textOrg = (real_x1, img.shape[0] - 10)
            else:
                textOrg = (real_x1, real_y1 + 5)

            cv2.rectangle(
                img, (textOrg[0] - 5, textOrg[1] + baseLine - 5),
                (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5),
                (0, 0, 0), 2)
            cv2.rectangle(
                img, (textOrg[0] - 5, textOrg[1] + baseLine - 5),
                (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5),
                (255, 255, 255), -1)
            cv2.putText(img, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1,
                        (0, 0, 0), 1)

    if print_dets:
        print(all_dets)
    return img
コード例 #4
0
	print(img_name)

	st = time.time()
	filepath = os.path.join(test_base_path, img_name)

	img = cv2.imread(filepath)

	X, ratio = format_img(img, C)
	X = np.transpose(X, (0, 2, 3, 1))

	# Getting feature-maps F & output layer Y1, Y2 from  RPN
	[Y1, Y2, F] = model_rpn.predict(X)
	
	# Getting boxes by applying NMS -> R.shape = (300, 4)
	R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh = 0.7)

	# Conversion from (x1,x2,y1,y2) to (x,y,w,h)
	R[:, 2] -= R[:, 0]
	R[:, 3] -= R[:, 1]

	# Spatial pyramid pooling to proposed regions
	bboxes = {}
	probs = {}

	for jk in range(R.shape[0] // (C.num_rois + 1)):
		ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis = 0)
		if ROIs.shape[1] == 0:
			break

		if jk == R.shape[0]//C.num_rois:
コード例 #5
0
def train():

    num_rois = 32
    epochs = 250
    model = "model_trained/frcnn_resnet.hdf5"
    ground_truth = "MIO-TCD-Localization/gt_train.csv"
    pretrained_model = "model/resnet50_weights_tf_dim_ordering_tf_kernels.h5"

    # load filenames and class mappings
    images, label_dict = parse_data(ground_truth)
    map1 = parse_mapping(ground_truth)

    # shuffle the data
    shuffle(images)

    # create train and validation data split
    train_data = []
    val_data = []

    # randomly choosing train and test
    for image in images:
        if np.random.randint(0, 6) == 0:
            val_data.append(image)
        else:
            train_data.append(image)

    print("Num train samples", len(train_data))
    print("Num val samples", len(val_data))

    data_gen_train = get_anchor_gt(train_data,
                                   label_dict,
                                   rn.get_img_output_length,
                                   K.image_dim_ordering(),
                                   mode='train')
    data_gen_val = get_anchor_gt(val_data,
                                 label_dict,
                                 rn.get_img_output_length,
                                 K.image_dim_ordering(),
                                 mode='val')

    if K.image_dim_ordering() == 'th':
        input_shape_img = (3, None, None)
    else:
        input_shape_img = (None, None, 3)

    img_input = Input(shape=input_shape_img)
    roi_input = Input(shape=(None, 4))

    # define the base network
    shared_layers = rn.nn_base(img_input, trainable=True)

    # define the RPN using the base layers
    num_anchors = 3 * 3  # 3 for number of scales and 3 number of aspect ratios
    rpn = rn.rpn(shared_layers, num_anchors)

    classifier = rn.classifier(shared_layers,
                               roi_input,
                               num_rois,
                               nb_classes=len(label_dict),
                               trainable=True)

    model_rpn = Model(img_input, rpn[:2])
    model_classifier = Model([img_input, roi_input], classifier)

    # this model holds both the RPN and the classifier
    model_all = Model([img_input, roi_input], rpn[:2] + classifier)

    print("Loading pretrained weights from", pretrained_model)
    model_rpn.load_weights(pretrained_model, by_name=True)
    model_classifier.load_weights(pretrained_model, by_name=True)

    optimizer = Adam(lr=1e-5)
    optimizer_classifier = Adam(lr=1e-5)
    model_rpn.compile(optimizer=optimizer,
                      loss=[
                          losses_fn.rpn_loss_cls(num_anchors),
                          losses_fn.rpn_loss_regr(num_anchors)
                      ])
    model_classifier.compile(
        optimizer=optimizer_classifier,
        loss=[
            losses_fn.class_loss_cls,
            losses_fn.class_loss_regr(len(label_dict) - 1)
        ],
        metrics={'dense_class_{}'.format(len(label_dict)): 'accuracy'})
    model_all.compile(optimizer='sgd', loss='mae')

    epoch_length = 250
    losses = np.zeros((epoch_length, 5))
    best_loss = np.inf
    iteration = 0

    for epoch in range(epochs):
        print("Epoch ", epoch + 1)

        # Training on Train Dataset

        X, Y, img_data = next(data_gen_train)
        P_rpn = model_rpn.predict_on_batch(X)
        result = rpn_to_roi(P_rpn[0], P_rpn[1], K.image_dim_ordering(), True,
                            0.7, 300)
        X2, Y1, Y2, IouS = calc_iou(result, img_data, map1)

        pos_samples = np.where(Y1[0, :, -1] == 0)
        if len(pos_samples) == 0:
            pos_samples = []
        else:
            pos_samples = pos_samples[0]

        neg_samples = np.where(Y1[0, :, -1] == 1)
        if len(neg_samples) == 0:
            neg_samples = []
        else:
            neg_samples = neg_samples[0]

        if num_rois > 1:
            if len(pos_samples) < num_rois // 2:
                selected_pos_samples = pos_samples.tolist()
            else:
                selected_pos_samples = np.random.choice(
                    pos_samples, num_rois // 2, replace=False).tolist()
            try:
                selected_neg_samples = np.random.choice(
                    neg_samples,
                    num_rois - len(selected_pos_samples),
                    replace=False).tolist()
            except:
                selected_neg_samples = np.random.choice(
                    neg_samples,
                    num_rois - len(selected_pos_samples),
                    replace=True).tolist()

            sel_samples = selected_pos_samples + selected_neg_samples
        else:
            if np.random.randint(0, 2) == 0:
                sel_samples = choice(pos_samples)
            else:
                sel_samples = choice(neg_samples)

        model_classifier.train_on_batch(
            [X, X2[:, sel_samples, :]],
            [Y1[:, sel_samples, :], Y2[:, sel_samples, :]])

        # Testing on Validation Dataset

        X, Y, img_data = next(data_gen_val)
        P_rpn = model_rpn.predict_on_batch(X)
        result = rpn_to_roi(P_rpn[0], P_rpn[1], K.image_dim_ordering(), True,
                            0.7, 300)
        X2, Y1, Y2, IouS = calc_iou(result, img_data, map1)

        pos_samples = np.where(Y1[0, :, -1] == 0)
        if len(pos_samples) == 0:
            pos_samples = []
        else:
            pos_samples = pos_samples[0]

        neg_samples = np.where(Y1[0, :, -1] == 1)
        if len(neg_samples) == 0:
            neg_samples = []
        else:
            neg_samples = neg_samples[0]

        if num_rois > 1:
            if len(pos_samples) < num_rois // 2:
                selected_pos_samples = pos_samples.tolist()
            else:
                selected_pos_samples = np.random.choice(
                    pos_samples, num_rois // 2, replace=False).tolist()
            try:
                selected_neg_samples = np.random.choice(
                    neg_samples,
                    num_rois - len(selected_pos_samples),
                    replace=False).tolist()
            except:
                selected_neg_samples = np.random.choice(
                    neg_samples,
                    num_rois - len(selected_pos_samples),
                    replace=True).tolist()

            sel_samples = selected_pos_samples + selected_neg_samples
        else:
            if np.random.randint(0, 2):
                sel_samples = choice(neg_samples)
            else:
                sel_samples = choice(pos_samples)

        loss_class = model_classifier.test_on_batch(
            [X, X2[:, sel_samples, :]],
            [Y1[:, sel_samples, :], Y2[:, sel_samples, :]])

        losses[iteration, 2] = loss_class[1]
        losses[iteration, 3] = loss_class[2]
        losses[iteration, 4] = loss_class[3]

        if iteration == epoch_length * (epoch + 1):
            rpn_cls_loss = np.mean(losses[:, 0])
            rpn_regr_loss = np.mean(losses[:, 1])
            class_cls_loss = np.mean(losses[:, 2])
            class_regr_loss = np.mean(losses[:, 3])
            class_acc = np.mean(losses[:, 4])

            print("Classifier accuracy for bounding boxes from RPN:",
                  class_acc)
            print("Loss RPN classifier:", rpn_cls_loss)
            print("Loss RPN regression:", rpn_regr_loss)
            print("Loss Detector classifier:", class_cls_loss)
            print("Loss Detector regression:", class_regr_loss)

            total_loss = rpn_cls_loss + rpn_regr_loss + class_cls_loss + class_regr_loss

            if total_loss < best_loss:
                best_loss = total_loss
                model_all.save_weights(model)

        iteration += 1