コード例 #1
0
def face_bb(test_img):
    img_height =512
    img_width = 512
    img_channels = 3
    _CONF = 0.60
    _IOU = 0.15
    coords = 'centroids' # Whether the box coordinates to be used as targets for the model should be in the 'centroids' or 'minmax' format, see documentation
    normalize_coords = True
    org_height = test_img.shape[0]
    org_width = test_img.shape[1]
    test_img = cv2.resize(test_img, (512, 512))
    test_img_input = np.expand_dims(test_img,axis=0)
    y_pred = model.predict(test_img_input)
    y_pred_decoded = decode_y2(y_pred,
                                 confidence_thresh=_CONF,
                                iou_threshold=_IOU,
                                top_k='all',
                                input_coords=coords,
                                normalize_coords=normalize_coords,
                                img_height=img_height,
                                img_width=img_width)
    result = y_pred_decoded[0][0]
    det_label = result[0]
    det_conf = result[1]
    det_xmin = result[2]
    det_xmax = result[3]
    det_ymin = result[4]
    det_ymax = result[5]
    #Converting to integers as the indexes to images are only integers
    bb = [int(det_xmin),int(det_xmax),int(det_ymin),int(det_ymax)]
    return bb
コード例 #2
0
def plot(path):
    X = cv2.imread(path)
    newimg = cv2.resize(X, (300, 300), interpolation=cv2.INTER_CUBIC)
    bg_img = np.zeros((300, 480, 3), dtype=np.uint8)
    bg_img[:, 90:390] = newimg
    for k in range(90):
        bg_img[:, k] = newimg[:, 0]
    for k in range(90):
        bg_img[:, 390 + k] = newimg[:, -1]
    bg_img = bg_img.reshape((1, ) + bg_img.shape)
    # print bg_img.shape
    y_pred = model.predict(bg_img)
    # print y_pred
    y_pred_decoded = decode_y2(y_pred,
                               confidence_thresh=0.1,
                               iou_threshold=0.1,
                               top_k='all',
                               input_coords='centroids',
                               normalize_coords=False,
                               img_height=None,
                               img_width=None)
    #print y_pred_decoded
    plt.figure(figsize=(20, 12))
    plt.imshow(bg_img[0])

    current_axis = plt.gca()

    #classes = ['background', 'car', 'truck', 'pedestrian', 'bicyclist', 'light'] # Just so we can print class names onto the image instead of IDs
    classes = [
        'background', 'card'
    ]  # Just so we can print class names onto the image instead of IDs

    # Draw the predicted boxes in blue
    for box in y_pred_decoded[0]:
        label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])
        current_axis.add_patch(
            plt.Rectangle((box[2], box[4]),
                          box[3] - box[2],
                          box[5] - box[4],
                          color='blue',
                          fill=False,
                          linewidth=2))
        current_axis.text(box[2],
                          box[4],
                          label,
                          size='x-large',
                          color='white',
                          bbox={
                              'facecolor': 'blue',
                              'alpha': 1.0
                          })
    plt.show()
コード例 #3
0
def predict(model, dg):
    for X, filenames in dg:
        Y_pred = model.predict(X)
        img_height, img_width = X[0].shape[:-1]
        Y_pred_decoded = decode_y2(Y_pred,
                                   confidence_thresh=0.5,
                                   iou_threshold=0.4,
                                   top_k='all',
                                   input_coords='centroids',
                                   normalize_coords=True,
                                   img_height=img_height,
                                   img_width=img_width)
        for idx, y_pred_decoded in enumerate(Y_pred_decoded):
            for bbox in y_pred_decoded:
                yield [filenames[idx]] + bbox.tolist()
コード例 #4
0
ファイル: trainbosch.py プロジェクト: frk2/ssd_keras
def predict(model, X):
    y_pred = model.predict(X)
    print(y_pred)
    y_pred_decoded = decode_y2(y_pred,
                               confidence_thresh=0.2,
                               iou_threshold=0.4,
                               top_k='all',
                               input_coords='centroids',
                               normalize_coords=False,
                               img_height=None,
                               img_width=None)

    print(
        "Decoded predictions (output format is [class_id, confidence, xmin, xmax, ymin, ymax]):\n"
    )
    print(y_pred_decoded)
    return y_pred_decoded
コード例 #5
0
ファイル: test3_eval.py プロジェクト: dhmodi/imagedetection
print("Image:", filenames[i])
print()
print("Ground truth boxes:\n")
print(y_true[i])

# 3: Make a prediction
model
y_pred = model.predict(X)

# 4: Decode the raw prediction `y_pred`

y_pred_decoded = decode_y2(y_pred,
                           confidence_thresh=0.5,
                           iou_threshold=0.4,
                           top_k='all',
                           input_coords='centroids',
                           normalize_coords=False,
                           img_height=None,
                           img_width=None)

print(
    "Decoded predictions (output format is [class_id, confidence, xmin, xmax, ymin, ymax]):\n"
)
print(y_pred_decoded[i])

# 5: Draw the predicted boxes onto the image

plt.figure(figsize=(20, 12))
plt.imshow(X[i])

current_axis = plt.gca()
コード例 #6
0
        # TODO: double check model input channel format, BGR or RGB
        rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        resized_rgb_frame = cv2.resize(rgb_frame,
                                       (MODEL_INPUT_WIDTH, MODEL_INPUT_HEIGHT))
        X = np.array([resized_rgb_frame])
        start_time = time.time()
        y_pred = model.predict(X)
        interval = time.time() - start_time
        print("prediction time:", interval)

        # Decode the raw prediction `y_pred`
        y_pred_decoded = decode_y2(y_pred,
                                   confidence_thresh=0.7,
                                   iou_threshold=0.4,
                                   top_k='all',
                                   input_coords='centroids',
                                   normalize_coords=True,
                                   img_height=INPUT_CAM_HEIGHT,
                                   img_width=INPUT_CAM_WIDTH)

        np.set_printoptions(precision=2, suppress=True, linewidth=90)
        #print("Decoded predictions (output format is [class_id, confidence, xmin, ymin, xmax, ymax]):\n")
        #print(y_pred_decoded)

        frame = plot_bboxes(frame, y_pred_decoded[0])

        cv2.imshow('Video', frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    i = 0  # Which batch item to look at

    print("Image:", filenames[i])
    print()
    print("Ground truth boxes:\n")
    print(y_true[i])
    # 3: Make a prediction

    y_pred = model.predict(X)
    # 4: Decode the raw prediction `y_pred`

    y_pred_decoded = decode_y2(y_pred,
                               confidence_thresh=0.7,
                               iou_threshold=0.2,
                               top_k='all',
                               input_coords='centroids',
                               normalize_coords=normalize_coords,
                               img_height=img_height,
                               img_width=img_width)

    np.set_printoptions(precision=2, suppress=True, linewidth=90)
    print("Predicted boxes:\n")
    print(y_pred_decoded[i])
    # 5: Draw the predicted boxes onto the image

    plt.figure(figsize=(20, 12))
    plt.imshow(X[i])

    current_axis = plt.gca()

    for box in y_true[i]:
コード例 #8
0

_CONF = 0.01 
_IOU = 0.15


for i in range(n_test_samples/batch_size):
  X, y, filenames = next(test_generator)

  y_pred = model.predict(X)


  y_pred_decoded = decode_y2(y_pred,
                             confidence_thresh=_CONF,
                            iou_threshold=_IOU,
                            top_k='all',
                            input_coords=coords,
                            normalize_coords=normalize_coords,
                            img_height=img_height,
                            img_width=img_width)


  np.set_printoptions(suppress=True)


  for i in range(batch_size):
    print colored("image %d :" %i, "cyan")
    print colored("predicted", "green")
    print y_pred_decoded[i]
    print colored("ground truth", "red")
    print y[i]
コード例 #9
0
print(colored("now predicting...", "yellow"))

_CONF = 0.01
_IOU = 0.15

for i in range(n_test_samples / batch_size):
    X, y, filenames = next(test_generator)

    y_pred = model.predict(X)

    y_pred_decoded = decode_y2(
        y_pred,
        confidence_thresh=_CONF,
        iou_threshold=_IOU,
        top_k="all",
        input_coords=coords,
        normalize_coords=normalize_coords,
        img_height=img_height,
        img_width=img_width,
    )

    np.set_printoptions(suppress=True)

    for j in range(batch_size):
        print(colored("image %d :" % j, "cyan"))
        print(colored("predicted", "green"))
        print(y_pred_decoded[j])
        print(colored("ground truth", "red"))
        print(y[j])

    save_bb("./output_test/", filenames[i], y_pred_decoded[i])