Exemplo n.º 1
0
    def predict(cls, img=None, img_id=None):

        detection_results = predict_image(img, cls.predictor)
        print(detection_results)

        predictions = {"img_id": str(img_id)}

        annotations = []
        for dr in detection_results:
            a = {}
            b = dr.box.tolist()
            a["bbox"] = [
                int(b[0]),
                int(b[1]),
                int(b[2] - b[0]),
                int(b[3] - b[1])
            ]
            category_id = dr.class_id

            a["category_id"] = int(category_id)
            a["category_name"] = cfg.DATA.CLASS_NAMES[int(category_id)]
            rle = cls.binary_mask_to_rle(dr.mask)
            a["segmentation"] = rle
            annotations.append(a)

        predictions["annotations"] = annotations
        print(predictions)

        return predictions
Exemplo n.º 2
0
def do_predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = predict_image(img, pred_func)
    final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    cv2.imwrite("output.png", viz)
    logger.info("Inference output written to output.png")
    tpviz.interactive_imshow(viz)
Exemplo n.º 3
0
def do_predict(pred_func, input_file, output_path=None):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = predict_image(img, pred_func)
    final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    out_filename = input_file.split("/")[-1]
    output_path = out_path + out_filename
    cv2.imwrite(output_path, viz)
    logger.info("Inference output for {} written to output.png".format(input_file))
Exemplo n.º 4
0
def do_predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = predict_image(img, pred_func)
    if cfg.MODE_MASK:
        final = draw_final_outputs_blackwhite(img, results)
    else:
        final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    cv2.imwrite("output.png", viz)
    logger.info(
        "Inference output for {} written to output.png".format(input_file))
def do_predict(predictor, input_file):
    img = cv2.imread(os.path.join('test_images', input_file), cv2.IMREAD_COLOR)
    results = predict_image(img, predictor)
    if cfg.MODE_MASK:
        final = draw_final_outputs_blackwhite(img, results)
    else:
        final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1) #concatenate hata dena 
    opp = cv2.imwrite(os.path.join('test_inferences', input_file.split('.')[0]+".png"), viz)
    if opp:
        logger.info("Inference output for {} Successful".format(input_file))
def do_predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = predict_image(img, pred_func)
    final = draw_final_outputs(img, results)

    # add green rectangle arround original picture that with failure
    height, width, channels = img.shape
    cv2.rectangle(img, (0, 0), (width, height),
                  color=(100, 220, 80), thickness=5)

    viz = np.concatenate((img, final), axis=1)
    cv2.imwrite(result_folder_path+"/0.png", viz)
    logger.info("Inference output written to 0.png")
Exemplo n.º 7
0
def do_predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    start = time.time()
    results = predict_image(img, pred_func)
    end = time.time()
    print('cost time is :/s', end - start)
    #if cfg.MODE_MASK:
    #    final = draw_final_outputs_blackwhite(img, results)
    #else:
    final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    cv2.imwrite("{}.png".format(input_file[-15:-4]), viz)
    logger.info("Inference output for {} written to {}.png".format(
        input_file, input_file[-15:-4]))
def do_predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = predict_image(img, pred_func)  # get error from this

    img_name = ntpath.basename(input_file)
    final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)

    if not os.path.exists(result_folder):
        os.makedirs(result_folder)

    cv2.imwrite(result_folder + img_name, viz)
    logger.info("Inference output for {} written to {}".format(
        input_file, result_folder))
Exemplo n.º 9
0
def do_predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    # start_time = time.time()
    results = predict_image(img, pred_func)
    # end_time = time.time()
    # print(f"--------- Inference time : {end_time - start_time}seconds -----------------")
    if cfg.MODE_MASK:
        final = draw_final_outputs_blackwhite(img, results)
    else:
        final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    cv2.imwrite("output.png", viz)
    logger.info(
        "Inference output for {} written to output.png".format(input_file))
    tpviz.interactive_imshow(viz)
Exemplo n.º 10
0
def do_predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = predict_image(img, pred_func)  # get error from this

    img_name = ntpath.basename(input_file)
    final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    save_path = "/home/jetson/Documents/result/" + model_num + "/"

    if not os.path.exists(save_path):
        os.makedirs(save_path)

    cv2.imwrite(save_path + img_name, viz)
    logger.info("Inference output for {} written to {}".format(
        input_file, save_path))
Exemplo n.º 11
0
def do_predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    # while True:
    print('Starting predtiction')
    start_time = time.time()
    results = predict_image(img, pred_func)
    print(time.time() - start_time)
    if cfg.MODE_MASK:
        final = draw_final_outputs_blackwhite(img, results)
    else:
        final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    cv2.imwrite("output.png", viz)
    logger.info(
        "Inference output for {} written to output.png".format(input_file))
def do_predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = predict_image(img, pred_func)
    final = draw_final_outputs(img, results)

    # add green rectangle arround original picture that with failure
    height, width, channels = img.shape
    cv2.rectangle(img, (0, 0), (width, height),
                  color=(100, 220, 80),
                  thickness=5)

    viz = np.concatenate((img, final), axis=1)
    cv2.imwrite(
        "/home/jetson/tensorpack/examples/FasterRCNN/static/images/output.png",
        viz)
    logger.info("Inference output written to output.png")
Exemplo n.º 13
0
def do_predict(pred_func, input_file):
    '''
    输入input_file,读取与图片,然后调用pred_func,得到了box,score,lables,
    结果输出到output.jpg,并交互显示
    
    boxes,labels,scores
    :param pred_func: 
    :param input_file: 
    :return: 
    '''
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = predict_image(img, pred_func)
    final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    cv2.imwrite("output.png", viz)
    logger.info("Inference output for {} written to output.png".format(input_file))
    tpviz.interactive_imshow(viz)
Exemplo n.º 14
0
def evaluate_rcnn(model_name, paper_arxiv_id, cfg_list, model_file):
    evaluator = COCOEvaluator(
        root=COCO_ROOT, model_name=model_name, paper_arxiv_id=paper_arxiv_id
    )
    category_id_to_coco_id = {
        v: k for k, v in COCODetection.COCO_id_to_category_id.items()
    }

    cfg.update_config_from_args(cfg_list)  # TODO backup/restore config
    finalize_configs(False)
    MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()
    predcfg = PredictConfig(
        model=MODEL,
        session_init=SmartInit(model_file),
        input_names=MODEL.get_inference_tensor_names()[0],
        output_names=MODEL.get_inference_tensor_names()[1],
    )
    predictor = OfflinePredictor(predcfg)

    def xyxy_to_xywh(box):
        box[2] -= box[0]
        box[3] -= box[1]
        return box

    df = get_eval_dataflow("coco_val2017")
    df.reset_state()
    for img, img_id in tqdm.tqdm(df, total=len(df)):
        results = predict_image(img, predictor)
        res = [
            {
                "image_id": img_id,
                "category_id": category_id_to_coco_id.get(
                    int(r.class_id), int(r.class_id)
                ),
                "bbox": xyxy_to_xywh([round(float(x), 4) for x in r.box]),
                "score": round(float(r.score), 3),
            }
            for r in results
        ]
        evaluator.add(res)
        if evaluator.cache_exists:
            break

    evaluator.save()
Exemplo n.º 15
0
def do_predict(pred_func, input_file):
    img = imread(input_file)
    if (len(img.shape) == 2):
        img = np.expand_dims(img, axis=2)
        img = np.repeat(img, 3, axis=2)

    img = scipy.misc.imresize(img, (800, 800))

    results = predict_image(img, pred_func)
    print(len(results))
    final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    #for result in results:
    #if(result.class_id != 3):
    #print(result)
    imsave('./output2/outputimg.tif', final)
    logger.info(
        "Inference output for {} written to outputimg.tif".format(input_file))
    return results
Exemplo n.º 16
0
def do_predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = predict_image(img, pred_func)
    if cfg.MODE_MASK:
        final = draw_final_outputs_blackwhite(img, results)
    else:
        final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    '''fxl
    # modify file name
    cv2.imwrite("output.png", viz)
    logger.info("Inference output for {} written to output.png".format(input_file))
    '''
    base_name = os.path.basename(input_file)
    img_name = os.path.splitext(base_name)[0]
    result_file = "/content/drive/My Drive/Colab/tp/myoutput/predict_output/" + img_name + ".png"
    cv2.imwrite(result_file, viz)
    print(img_number, end='')
    print("." +
          "Inference for {} written to {}".format(base_name, result_file))
Exemplo n.º 17
0
def _predict_with_gt(pred_func,
                     input_file,
                     ground_truths,
                     output_dir=None,
                     font_rs=10,
                     thickness_rs=10):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)

    # resized_img, orig_shape, scale = run_resize_image(img)
    #  TODO: predict_image already contains resize
    results = predict_image(img, pred_func)
    results = list(filter(lambda x: x.score > 0.7, results))
    font_scale = np.sqrt(min(img.shape[:2])) / font_rs
    thickness = thickness_rs
    print('font_scale:', font_scale)

    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    if cfg.MODE_MASK:
        final = draw_final_outputs_blackwhite(img,
                                              results,
                                              font_scale=font_scale,
                                              thickness=thickness)
    else:
        final = draw_final_outputs(img,
                                   results,
                                   font_scale=font_scale,
                                   thickness=thickness)

    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    image_with_gt = draw_final_outputs(img,
                                       ground_truths,
                                       font_scale=font_scale,
                                       thickness=thickness)
    viz = np.concatenate((image_with_gt, final), axis=1)
    out_path = os.path.join(output_dir,
                            re.sub('/', '-', input_file) + '.out.png')
    cv2.imwrite(out_path, viz)
    logger.info("Inference output for {} written to\n {}".format(
        input_file, out_path))
Exemplo n.º 18
0
    if args.visualize:
        do_visualize(MODEL, args.load)
    else:
        predcfg = PredictConfig(
            model=MODEL,
            session_init=get_model_loader(args.load),
            input_names=MODEL.get_inference_tensor_names()[0],
            output_names=MODEL.get_inference_tensor_names()[1])

        if args.compact:
            ModelExporter(predcfg).export_compact(args.compact, optimize=False)
        elif args.serving:
            ModelExporter(predcfg).export_serving(args.serving, optimize=False)

        if args.predict:
            predictor = OfflinePredictor(predcfg)
            for image_file in args.predict:
                do_predict(predictor, image_file)
        elif args.evaluate:
            assert args.evaluate.endswith('.json'), args.evaluate
            do_evaluate(predcfg, args.evaluate)
        elif args.benchmark:
            df = get_eval_dataflow(cfg.DATA.VAL[0])
            df.reset_state()
            predictor = OfflinePredictor(predcfg)
            for img in tqdm.tqdm(df, total=len(df)):
                # This include post-processing time, which is done on CPU and not optimized
                # To exclude it, modify `predict_image`.
                predict_image(img[0], predictor)