x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th) except: pass bboxes[cls_name].append([C.rpn_stride * x, C.rpn_stride * y, C.rpn_stride*(x+w), C.rpn_stride*(y+h)]) # probs[cls_name].append(np.max(P_cls[0, ii, :])) all_dets = [] for key in bboxes: if key == 'Pedestrian': bbox = np.array(bboxes[key]) new_boxes, new_probs = roi_helpers.non_max_suppression_fast(bbox, np.array(probs[key]), overlap_thresh = 0.2) for jk in range(new_boxes.shape[0]): (x1, y1, x2, y2) = new_boxes[jk,:] (real_x1, real_y1, real_x2, real_y2) = get_real_coordinates(ratio, x1, y1, x2, y2) cv2.rectangle(img, (real_x1, real_y1), (real_x2, real_y2), (int(class_to_color[key][0]), int(class_to_color[key][1]), int(class_to_color[key][2])),4) textLabel = '{}: {}'.format('Vehicle', int(100 * new_probs[jk])) all_dets.append((key, 100 * new_probs[jk])) (retval, baseLine) = cv2.getTextSize(textLabel, cv2.FONT_HERSHEY_COMPLEX, 1, 1) textOrg = (real_x1, real_y1 - 0) cv2.rectangle(img, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (0, 0, 0), 1) cv2.rectangle(img, (textOrg[0] - 5,textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (255, 255, 255), -1)
def predict_single_image(img_path, model_rpn, model_classifier_only, class_mapping): img = cv2.imread(img_path) if img is None: print('reading image failed.') exit(0) X, ratio = format_img(img) if K.image_dim_ordering() == 'tf': X = np.transpose(X, (0, 2, 3, 1)) # get the feature maps and output from the RPN [Y1, Y2, F] = model_rpn.predict(X) result = rpn_to_roi(Y1, Y2, K.image_dim_ordering(), overlap_thresh=0.7) # convert from (x1,y1,x2,y2) to (x,y,w,h) result[:, 2] -= result[:, 0] result[:, 3] -= result[:, 1] bbox_threshold = 0.7 # apply the spatial pyramid pooling to the proposed regions boxes = dict() for jk in range(result.shape[0] // 32 + 1): rois = np.expand_dims(result[32 * jk:32 * (jk + 1), :], axis=0) if rois.shape[1] == 0: break if jk == result.shape[0] // 32: # pad R curr_shape = rois.shape target_shape = (curr_shape[0], 32, curr_shape[2]) rois_padded = np.zeros(target_shape).astype(rois.dtype) rois_padded[:, :curr_shape[1], :] = rois rois_padded[0, curr_shape[1]:, :] = rois[0, 0, :] rois = rois_padded [p_cls, p_regr] = model_classifier_only.predict([F, rois]) for ii in range(p_cls.shape[1]): if np.max(p_cls[0, ii, :]) < bbox_threshold or np.argmax( p_cls[0, ii, :]) == (p_cls.shape[2] - 1): continue cls_num = np.argmax(p_cls[0, ii, :]) if cls_num not in boxes.keys(): boxes[cls_num] = [] (x, y, w, h) = rois[0, ii, :] try: (tx, ty, tw, th) = p_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)] tx /= 8.0 ty /= 8.0 tw /= 4.0 th /= 4.0 x, y, w, h = apply_regr(x, y, w, h, tx, ty, tw, th) except Exception as e: print(e) pass boxes[cls_num].append([ 16 * x, 16 * y, 16 * (x + w), 16 * (y + h), np.max(p_cls[0, ii, :]) ]) # add some nms to reduce many boxes for cls_num, box in boxes.items(): boxes_nms = non_max_suppression_fast(box, overlap_thresh=0.5) boxes[cls_num] = boxes_nms #print(class_mapping[cls_num] + ":") for b in boxes_nms: b[0], b[1], b[2], b[3] = get_real_coordinates( ratio, b[0], b[1], b[2], b[3]) f.write(",".join([ img_path.split("/")[-1].split(".")[0], class_mapping[cls_num], str(b[-1]), str(b[0]), str(b[1]), str(b[2]), str(b[3]) ]) + "\n") img = draw_boxes_and_label_on_image_cv2(img, class_mapping, boxes) result_path = './resnet_aug_results_images/{}.jpg'.format( os.path.basename(img_path).split('.')[0]) cv2.imwrite(result_path, img)
def detect_predict(pic, C, model_rpn, model_classifier, model_classifier_only, class_mapping, class_to_color, print_dets=False): """ Detect and predict object in the picture :param pic: picture numpy array :param C: config object :params model_*: models from get_models function :params class_*: mapping and colors, need to be loaded to keep the same colors/classes :return: picture with bounding boxes """ img = pic X, ratio = format_img(img, C) img_scaled = np.transpose(X.copy()[0, (2, 1, 0), :, :], (1, 2, 0)).copy() img_scaled[:, :, 0] += 123.68 img_scaled[:, :, 1] += 116.779 img_scaled[:, :, 2] += 103.939 img_scaled = img_scaled.astype(np.uint8) if K.image_dim_ordering() == 'tf': X = np.transpose(X, (0, 2, 3, 1)) # get the feature maps and output from the RPN [Y1, Y2, F] = model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=0.7) # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {} # print(class_mapping) for jk in range(R.shape[0] // C.num_rois + 1): ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0) if ROIs.shape[1] == 0: break if jk == R.shape[0] // C.num_rois: #pad R curr_shape = ROIs.shape target_shape = (curr_shape[0], C.num_rois, curr_shape[2]) ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype) ROIs_padded[:, :curr_shape[1], :] = ROIs ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :] ROIs = ROIs_padded [P_cls, P_regr] = model_classifier_only.predict([F, ROIs]) for ii in range(P_cls.shape[1]): if np.max(P_cls[0, ii, :]) < bbox_threshold or np.argmax( P_cls[0, ii, :]) == (P_cls.shape[2] - 1): continue cls_name = class_mapping[np.argmax(P_cls[0, ii, :])] if cls_name not in bboxes: bboxes[cls_name] = [] probs[cls_name] = [] (x, y, w, h) = ROIs[0, ii, :] cls_num = np.argmax(P_cls[0, ii, :]) try: (tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)] tx /= C.classifier_regr_std[0] ty /= C.classifier_regr_std[1] tw /= C.classifier_regr_std[2] th /= C.classifier_regr_std[3] x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th) except: pass bboxes[cls_name].append([ C.rpn_stride * x, C.rpn_stride * y, C.rpn_stride * (x + w), C.rpn_stride * (y + h) ]) probs[cls_name].append(np.max(P_cls[0, ii, :])) all_dets = [] for key in bboxes: bbox = np.array(bboxes[key]) new_boxes, new_probs = roi_helpers.non_max_suppression_fast( bbox, np.array(probs[key]), overlap_thresh=overlap_thresh) jk = np.argmax(new_probs) if new_probs[jk] > 0.55: (x1, y1, x2, y2) = new_boxes[jk, :] (real_x1, real_y1, real_x2, real_y2) = get_real_coordinates(ratio, x1, y1, x2, y2) cv2.rectangle( img, (real_x1, real_y1), (real_x2, real_y2), (int(class_to_color[key][0]), int( class_to_color[key][1]), int(class_to_color[key][2])), 2) textLabel = '{}: {}%'.format(key, int(100 * new_probs[jk])) all_dets.append((key, 100 * new_probs[jk])) (retval, baseLine) = cv2.getTextSize(textLabel, cv2.FONT_HERSHEY_COMPLEX, 1, 1) # To avoid putting text outside the frame # replace the legende if the box is outside the image if real_y1 < 20 and real_y2 < img.shape[0]: textOrg = (real_x1, real_y2 + 5) elif real_y1 < 20 and real_y2 > img.shape[0]: textOrg = (real_x1, img.shape[0] - 10) else: textOrg = (real_x1, real_y1 + 5) cv2.rectangle( img, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (0, 0, 0), 2) cv2.rectangle( img, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (255, 255, 255), -1) cv2.putText(img, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1) if print_dets: print(all_dets) return img