예제 #1
0
def do_predict_pb(sess, input_tensor, output_tensors, input_file, output_file, drawcontour=True):
    print('input fn: ', input_file)
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = predict_image_pb(sess, input_tensor, output_tensors, img)
    if cfg.MODE_MASK:
        final = draw_final_outputs_blackwhite(img, results)
    else:
        final = draw_final_outputs(img, results)
        
    if drawcontour==True:
        if results:
            binary = results[0].mask*255
#             dilate = cv2.dilate(binary, np.ones((7,7), np.uint8))
#             erode = cv2.erode(dilate, np.ones((9,9), np.uint8))
#             edge = binary - erode
# #             idx_r, idx_c = np.where(edge==255)
# #             idx1 = np.stack((idx_r, idx_c), axis=1)
#             idx1 = np.where(edge==255)
#             edge3d = np.zeros((edge.shape[0], edge.shape[1], 3))
#             edge3d[idx1] = 255
#             viz = np.concatenate((img, final, edge3d), axis=1)
            binary = np.repeat(binary[:, :, np.newaxis], 3, axis=-1)
            viz = np.concatenate((img, final, binary), axis=1)
        else:
            viz = img
    else:
        viz = final
    cv2.imwrite(output_file, viz)
    logger.info("Inference output for {} written to output.png".format(output_file))
예제 #2
0
파일: train.py 프로젝트: hakillha/maria03
def predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = detect_one_image(img, pred_func)
    final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    # tpviz.interactive_imshow(viz)
    cv2.imwrite(os.path.basename(input_file), viz)
예제 #3
0
def viz_detection(args, fname, bb_list):
    input_file = os.path.join(args.anno_dir, '..', 'frames',
                              os.path.basename(fname).split('.')[0] + '.jpg')
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    final = draw_final_outputs(img, bb_list, tags_on=False, bb_list_input=True)
    viz = np.concatenate((img, final), axis=1)
    cv2.imwrite(os.path.basename(input_file), viz)
예제 #4
0
def visualize(model, model_path, nr_visualize=100, output_dir='output'):
    """
    Visualize some intermediate results (proposals, raw predictions) inside the pipeline.
    """
    df = get_train_dataflow()  # we don't visualize mask stuff
    df.reset_state()

    pred = OfflinePredictor(
        PredictConfig(model=model,
                      session_init=get_model_loader(model_path),
                      input_names=['image', 'gt_boxes', 'gt_labels'],
                      output_names=[
                          'generate_{}_proposals/boxes'.format(
                              'fpn' if cfg.MODE_FPN else 'rpn'),
                          'generate_{}_proposals/probs'.format(
                              'fpn' if cfg.MODE_FPN else 'rpn'),
                          'fastrcnn_all_probs',
                          'final_boxes',
                          'final_probs',
                          'final_labels',
                      ]))

    if os.path.isdir(output_dir):
        shutil.rmtree(output_dir)
    utils.fs.mkdir_p(output_dir)
    with tqdm.tqdm(total=nr_visualize) as pbar:
        for idx, dp in itertools.islice(enumerate(df.get_data()),
                                        nr_visualize):
            img = dp[0]
            if cfg.MODE_MASK:
                gt_boxes, gt_labels, gt_masks = dp[-3:]
            else:
                gt_boxes, gt_labels = dp[-2:]

            rpn_boxes, rpn_scores, all_probs, \
                final_boxes, final_probs, final_labels = pred(img, gt_boxes, gt_labels)

            # draw groundtruth boxes
            gt_viz = draw_annotation(img, gt_boxes, gt_labels)
            # draw best proposals for each groundtruth, to show recall
            proposal_viz, good_proposals_ind = draw_proposal_recall(
                img, rpn_boxes, rpn_scores, gt_boxes)
            # draw the scores for the above proposals
            score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind],
                                         all_probs[good_proposals_ind])

            results = [
                DetectionResult(*args)
                for args in zip(final_boxes, final_probs, final_labels,
                                [None] * len(final_labels))
            ]
            final_viz = draw_final_outputs(img, results)

            viz = tpviz.stack_patches(
                [gt_viz, proposal_viz, score_viz, final_viz], 2, 2)

            if os.environ.get('DISPLAY', None):
                tpviz.interactive_imshow(viz)
            cv2.imwrite("{}/{:03d}.png".format(output_dir, idx), viz)
            pbar.update()
예제 #5
0
파일: train.py 프로젝트: aljosaosep/mprcnn
def predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = detect_one_image(img, pred_func)
    print(len(results))
    final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    tpviz.interactive_imshow(viz)
예제 #6
0
def do_predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = predict_image(img, pred_func)
    final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    cv2.imwrite("output.png", viz)
    logger.info("Inference output written to output.png")
    tpviz.interactive_imshow(viz)
예제 #7
0
def predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = detect_one_image(img, pred_func)
    final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    # tpviz.interactive_imshow(viz)
    cv2.imwrite('../drive/train_log/predicted_image.jpg', viz)
    cv2.destroyAllWindows()
예제 #8
0
파일: train.py 프로젝트: aljosaosep/mprcnn
def visualize(model_path, nr_visualize=50, output_dir='output'):
    df = get_train_dataflow_coco()  # we don't visualize mask stuff
    df.reset_state()

    pred = OfflinePredictor(
        PredictConfig(model=Model(),
                      session_init=get_model_loader(model_path),
                      input_names=['image', 'gt_boxes', 'gt_labels'],
                      output_names=[
                          'generate_rpn_proposals/boxes',
                          'generate_rpn_proposals/probs',
                          'fastrcnn_all_probs',
                          'final_boxes',
                          'final_probs',
                          'final_labels',
                      ]))

    if os.path.isdir(output_dir):
        shutil.rmtree(output_dir)
    utils.fs.mkdir_p(output_dir)
    with tqdm.tqdm(total=nr_visualize) as pbar:
        for idx, dp in itertools.islice(enumerate(df.get_data()),
                                        nr_visualize):
            img, _, _, gt_boxes, gt_labels = dp

            rpn_boxes, rpn_scores, all_probs, \
                final_boxes, final_probs, final_labels = pred(img, gt_boxes, gt_labels)

            # draw groundtruth boxes
            gt_viz = draw_annotation(img, gt_boxes, gt_labels)
            # draw best proposals for each groundtruth, to show recall
            proposal_viz, good_proposals_ind = draw_proposal_recall(
                img, rpn_boxes, rpn_scores, gt_boxes)
            # draw the scores for the above proposals
            score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind],
                                         all_probs[good_proposals_ind])

            if config.USE_SECOND_HEAD:
                results = [
                    SecondDetectionResult(*args)
                    for args in zip(final_boxes, final_probs, final_labels,
                                    [None] * len(final_labels))
                ]
            else:
                results = [
                    DetectionResult(*args)
                    for args in zip(final_boxes, final_probs, final_labels,
                                    [None] * len(final_labels))
                ]
            final_viz = draw_final_outputs(img, results)

            viz = tpviz.stack_patches(
                [gt_viz, proposal_viz, score_viz, final_viz], 2, 2)

            if os.environ.get('DISPLAY', None):
                tpviz.interactive_imshow(viz)
            cv2.imwrite("{}/{:03d}.png".format(output_dir, idx), viz)
            pbar.update()
예제 #9
0
def eval_on_dataflow(df, detect_func, tqdm_bar=None):
    """
    Args:
        df: a DataFlow which produces (image, image_id)
        detect_func: a callable, takes [image] and returns [DetectionResult]
        tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None,
            will create a new one.
    Return:
        list of dict, to be dumped to COCO json format
    """
    csv_path = os.path.join(config.BASEDIR, 'train_ship_segmentations_v2.csv')
    seg_df = pd.read_csv(csv_path, engine="python")
    seg_df = seg_df.dropna(axis=0)
    seg_df = seg_df.set_index('ImageId')

    df.reset_state()
    all_results = []
    # tqdm is not quite thread-safe: https://github.com/tqdm/tqdm/issues/323
    with ExitStack() as stack:
        if tqdm_bar is None:
            tqdm_bar = stack.enter_context(
                tqdm.tqdm(total=df.size(), **get_tqdm_kwargs()))
        score = 0.0
        all_score = []
        count = 0.0
        eval_names = []
        eval_imgs = []
        all_det = []
        all_im = []
        for img, img_id in df.get_data():
            results = detect_func(img)
            #mask_whole = detect_func(img)
            #all_det.append(mask_whole)
            all_im.append(img)
            eval_names.append(img_id)
            final = draw_final_outputs(img, results)
            cv2.imwrite('./eval_out_bb/{}'.format(img_id), final)
            mask_instances = [r.mask for r in results]
            score_instances = [r.score for r in results]

            masks = clean_overlap_instance(mask_instances, score_instances,
                                           img_id)
            if len(masks) == 0:
                print("no mask!!", img_id)
                v = 0
            else:
                v = local_eval(masks, img_id, seg_df)  #pred, imgId
            score += v
            all_score.append(v)
            count += 1
            tqdm_bar.update(1)
        for k in np.array(all_score).argsort()[:20]:
            print(all_score[k], eval_names[k])
        #    cv2.imwrite("./eval_out/{}".format(eval_names[k]), all_im[k])
        #    cv2.imwrite("./eval_out/{}_mask.jpg".format(eval_names[k].split(".")[0]), all_det[k]*255)
        print("Local Eval: ", score / count)
    return all_results, score / count
예제 #10
0
def do_predict(pred_func, input_file, output_path=None):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = predict_image(img, pred_func)
    final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    out_filename = input_file.split("/")[-1]
    output_path = out_path + out_filename
    cv2.imwrite(output_path, viz)
    logger.info("Inference output for {} written to output.png".format(input_file))
예제 #11
0
def predict_many(pred_func, input_files):
    if not os.path.exists('output'):
        os.mkdir('output')
    for idx, input_file in enumerate(input_files):
        img = cv2.imread(input_file, cv2.IMREAD_COLOR)
        results = detect_one_image(img, pred_func)
        final = draw_final_outputs(img, results)
        plt.imshow(final)
        plt.savefig(os.path.join('output', str(idx) + '.png'))
def do_predict(predictor, input_file):
    img = cv2.imread(os.path.join('test_images', input_file), cv2.IMREAD_COLOR)
    results = predict_image(img, predictor)
    if cfg.MODE_MASK:
        final = draw_final_outputs_blackwhite(img, results)
    else:
        final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1) #concatenate hata dena 
    opp = cv2.imwrite(os.path.join('test_inferences', input_file.split('.')[0]+".png"), viz)
    if opp:
        logger.info("Inference output for {} Successful".format(input_file))
예제 #13
0
def do_predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = predict_image(img, pred_func)
    if cfg.MODE_MASK:
        final = draw_final_outputs_blackwhite(img, results)
    else:
        final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    cv2.imwrite("output.png", viz)
    logger.info(
        "Inference output for {} written to output.png".format(input_file))
예제 #14
0
def visualize(model, model_path, nr_visualize=100, output_dir='output'):
    """
    Visualize some intermediate results (proposals, raw predictions) inside the pipeline.
    """
    df = get_train_dataflow()   # we don't visualize mask stuff
    df.reset_state()

    pred = OfflinePredictor(PredictConfig(
        model=model,
        session_init=get_model_loader(model_path),
        input_names=['image', 'gt_boxes', 'gt_labels'],
        output_names=[
            'generate_{}_proposals/boxes'.format('fpn' if cfg.MODE_FPN else 'rpn'),
            'generate_{}_proposals/scores'.format('fpn' if cfg.MODE_FPN else 'rpn'),
            'fastrcnn_all_scores',
            'output/boxes',
            'output/scores',
            'output/labels',
        ]))

    if os.path.isdir(output_dir):
        shutil.rmtree(output_dir)
    utils.fs.mkdir_p(output_dir)
    with tqdm.tqdm(total=nr_visualize) as pbar:
        for idx, dp in itertools.islice(enumerate(df), nr_visualize):
            img = dp[0]
            if cfg.MODE_MASK:
                gt_boxes, gt_labels, gt_masks = dp[-3:]
            else:
                gt_boxes, gt_labels = dp[-2:]

            rpn_boxes, rpn_scores, all_scores, \
                final_boxes, final_scores, final_labels = pred(img, gt_boxes, gt_labels)

            # draw groundtruth boxes
            gt_viz = draw_annotation(img, gt_boxes, gt_labels)
            # draw best proposals for each groundtruth, to show recall
            proposal_viz, good_proposals_ind = draw_proposal_recall(img, rpn_boxes, rpn_scores, gt_boxes)
            # draw the scores for the above proposals
            score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind], all_scores[good_proposals_ind])

            results = [DetectionResult(*args) for args in
                       zip(final_boxes, final_scores, final_labels,
                           [None] * len(final_labels))]
            final_viz = draw_final_outputs(img, results)

            viz = tpviz.stack_patches([
                gt_viz, proposal_viz,
                score_viz, final_viz], 2, 2)

            if os.environ.get('DISPLAY', None):
                tpviz.interactive_imshow(viz)
            cv2.imwrite("{}/{:03d}.png".format(output_dir, idx), viz)
            pbar.update()
예제 #15
0
파일: train.py 프로젝트: Bonnie970/cedar
def predict_one_image(input_file, pred_func):
    print('Predicting '+input_file)
    path = '/home/bonniehu/projects/def-jjclark/bonniehu/FasterRCNN/pred_out/'
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = detect_one_image(img, pred_func)
    trimap_path = 'trimap/'
    trimaps = trimap_outputs(img, results, trimap_path, input_file.split('/')[-1][:-4])
    final = draw_final_outputs(img, results)
    #viz = np.concatenate((img, final), axis=1)
    #not support in ubuntu 
    #tpviz.interactive_imshow(viz)
    misc.imsave(path+input_file.split('/')[-1][:-4]+'_mask.png', final)
예제 #16
0
def _predict_with_gt(pred_func,
                     input_file,
                     ground_truths,
                     output_dir=None,
                     font_rs=10,
                     thickness_rs=10):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)

    # resized_img, orig_shape, scale = run_resize_image(img)
    #  TODO: predict_image already contains resize
    results = predict_image(img, pred_func)
    results = list(filter(lambda x: x.score > 0.7, results))
    font_scale = np.sqrt(min(img.shape[:2])) / font_rs
    thickness = thickness_rs
    print('font_scale:', font_scale)

    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    if cfg.MODE_MASK:
        final = draw_final_outputs_blackwhite(img,
                                              results,
                                              font_scale=font_scale,
                                              thickness=thickness)
    else:
        final = draw_final_outputs(img,
                                   results,
                                   font_scale=font_scale,
                                   thickness=thickness)

    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    image_with_gt = draw_final_outputs(img,
                                       ground_truths,
                                       font_scale=font_scale,
                                       thickness=thickness)
    viz = np.concatenate((image_with_gt, final), axis=1)
    out_path = os.path.join(output_dir,
                            re.sub('/', '-', input_file) + '.out.png')
    cv2.imwrite(out_path, viz)
    logger.info("Inference output for {} written to\n {}".format(
        input_file, out_path))
def do_predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = predict_image(img, pred_func)
    final = draw_final_outputs(img, results)

    # add green rectangle arround original picture that with failure
    height, width, channels = img.shape
    cv2.rectangle(img, (0, 0), (width, height),
                  color=(100, 220, 80), thickness=5)

    viz = np.concatenate((img, final), axis=1)
    cv2.imwrite(result_folder_path+"/0.png", viz)
    logger.info("Inference output written to 0.png")
예제 #18
0
def predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    t = time.time()
    results = detect_one_image(img, pred_func)
    t_final = time.time() - t
    height, width, channels = img.shape
    print("Time: ", t_final, " for ", height * width / 1e6,
          " Mpixels , i.e. Width=", width, " Height=", height, " Channels=",
          channels)
    print("Throughput: ", height * width / 1e6 / t_final, " Mpixels/Sec")

    final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    tpviz.interactive_imshow(viz)
예제 #19
0
def classifier_eval_output(df, pred_func, tqdm_bar=None):
    """
    Args:
        df: a DataFlow which produces (image, image_id)
        detect_func: a callable, takes [image] and returns [DetectionResult]
        tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None,
            will create a new one.

    Returns:
        list of dict, to be dumped to COCO json format
    """
    df.reset_state()
    all_results = []
    jsonable = False
    # tqdm is not quite thread-safe: https://github.com/tqdm/tqdm/issues/323
    with ExitStack() as stack:
        if tqdm_bar is None:
            tqdm_bar = stack.enter_context(
                tqdm.tqdm(total=df.size(), **get_tqdm_kwargs()))
        for fname, img, orig_shape in df.get_data():
            bbs, probs = pred_func(img, orig_shape)

            if VIZ:
                input_file = os.path.join(
                    '/media/yingges/TOSHIBA EXT/datasets/re-ID/PRW-v16.04.20/frames',
                    os.path.basename(fname).split('.')[0] + '.jpg')
                img = cv2.imread(input_file, cv2.IMREAD_COLOR)
                final = draw_final_outputs(img,
                                           bbs,
                                           tags_on=False,
                                           bb_list_input=True)
                viz = np.concatenate((img, final), axis=1)
                cv2.imwrite(os.path.basename(input_file), viz)

            result_list = []
            result_list.append(fname)
            bb_list = []
            prob_list = []
            for bb, prob in zip(bbs, probs):
                bb_list.append(list(map(lambda x: round(float(x), 2), bb)))
                prob_list.append(list(map(lambda x: round(float(x), 4), prob)))
            result_list.append(bb_list)
            result_list.append(prob_list)

            if not jsonable:
                jsonable = jsonable_test(result_list)

            all_results.append(result_list)
            tqdm_bar.update(1)
    return all_results
예제 #20
0
def predict(model_path, input_file):
    pred = OfflinePredictor(
        PredictConfig(model=Model(),
                      session_init=get_model_loader(model_path),
                      input_names=['image'],
                      output_names=[
                          'fastrcnn_fg_probs',
                          'fastrcnn_fg_boxes',
                      ]))
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = detect_one_image(img, pred)
    final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    tpviz.interactive_imshow(viz)
def do_predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = predict_image(img, pred_func)  # get error from this

    img_name = ntpath.basename(input_file)
    final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)

    if not os.path.exists(result_folder):
        os.makedirs(result_folder)

    cv2.imwrite(result_folder + img_name, viz)
    logger.info("Inference output for {} written to {}".format(
        input_file, result_folder))
예제 #22
0
def do_predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    start = time.time()
    results = predict_image(img, pred_func)
    end = time.time()
    print('cost time is :/s', end - start)
    #if cfg.MODE_MASK:
    #    final = draw_final_outputs_blackwhite(img, results)
    #else:
    final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    cv2.imwrite("{}.png".format(input_file[-15:-4]), viz)
    logger.info("Inference output for {} written to {}.png".format(
        input_file, input_file[-15:-4]))
예제 #23
0
def do_predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    # while True:
    print('Starting predtiction')
    start_time = time.time()
    results = predict_image(img, pred_func)
    print(time.time() - start_time)
    if cfg.MODE_MASK:
        final = draw_final_outputs_blackwhite(img, results)
    else:
        final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    cv2.imwrite("output.png", viz)
    logger.info(
        "Inference output for {} written to output.png".format(input_file))
예제 #24
0
def do_predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    # start_time = time.time()
    results = predict_image(img, pred_func)
    # end_time = time.time()
    # print(f"--------- Inference time : {end_time - start_time}seconds -----------------")
    if cfg.MODE_MASK:
        final = draw_final_outputs_blackwhite(img, results)
    else:
        final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    cv2.imwrite("output.png", viz)
    logger.info(
        "Inference output for {} written to output.png".format(input_file))
    tpviz.interactive_imshow(viz)
def do_predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = predict_image(img, pred_func)  # get error from this

    img_name = ntpath.basename(input_file)
    final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    save_path = "/home/jetson/Documents/result/" + model_num + "/"

    if not os.path.exists(save_path):
        os.makedirs(save_path)

    cv2.imwrite(save_path + img_name, viz)
    logger.info("Inference output for {} written to {}".format(
        input_file, save_path))
def do_predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = predict_image(img, pred_func)
    final = draw_final_outputs(img, results)

    # add green rectangle arround original picture that with failure
    height, width, channels = img.shape
    cv2.rectangle(img, (0, 0), (width, height),
                  color=(100, 220, 80),
                  thickness=5)

    viz = np.concatenate((img, final), axis=1)
    cv2.imwrite(
        "/home/jetson/tensorpack/examples/FasterRCNN/static/images/output.png",
        viz)
    logger.info("Inference output written to output.png")
예제 #27
0
파일: train.py 프로젝트: mckjzhangxk/deepAI
def do_predict(pred_func, input_file):
    '''
    输入input_file,读取与图片,然后调用pred_func,得到了box,score,lables,
    结果输出到output.jpg,并交互显示
    
    boxes,labels,scores
    :param pred_func: 
    :param input_file: 
    :return: 
    '''
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = predict_image(img, pred_func)
    final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    cv2.imwrite("output.png", viz)
    logger.info("Inference output for {} written to output.png".format(input_file))
    tpviz.interactive_imshow(viz)
예제 #28
0
def visualize(model_path, nr_visualize=50, output_dir='output'):
    pred = OfflinePredictor(
        PredictConfig(model=Model(),
                      session_init=get_model_loader(model_path),
                      input_names=['image', 'gt_boxes', 'gt_labels'],
                      output_names=[
                          'generate_rpn_proposals/boxes',
                          'generate_rpn_proposals/probs',
                          'fastrcnn_all_probs',
                          'fastrcnn_fg_probs',
                          'fastrcnn_fg_boxes',
                      ]))
    df = get_train_dataflow()
    df.reset_state()

    if os.path.isdir(output_dir):
        shutil.rmtree(output_dir)
    utils.fs.mkdir_p(output_dir)
    with tqdm.tqdm(total=nr_visualize) as pbar:
        for idx, dp in itertools.islice(enumerate(df.get_data()),
                                        nr_visualize):
            img, _, _, gt_boxes, gt_labels = dp

            rpn_boxes, rpn_scores, all_probs, fg_probs, fg_boxes = pred(
                img, gt_boxes, gt_labels)

            gt_viz = draw_annotation(img, gt_boxes, gt_labels)
            proposal_viz, good_proposals_ind = draw_proposal_recall(
                img, rpn_boxes, rpn_scores, gt_boxes)
            score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind],
                                         all_probs[good_proposals_ind])

            fg_boxes = clip_boxes(fg_boxes, img.shape[:2])
            fg_viz = draw_predictions(img, fg_boxes, fg_probs)

            results = nms_fastrcnn_results(fg_boxes, fg_probs)
            final_viz = draw_final_outputs(img, results)

            viz = tpviz.stack_patches(
                [gt_viz, proposal_viz, score_viz, fg_viz, final_viz], 2, 3)

            if os.environ.get('DISPLAY', None):
                tpviz.interactive_imshow(viz)
            cv2.imwrite("{}/{:03d}.png".format(output_dir, idx), viz)
            pbar.update()
def predict(pred_func, input_file):
    image_paths = []
    if os.path.isdir(input_file):
        for inp_file in os.listdir(input_file):
            image_paths += [input_file + inp_file]
    for image_path in image_paths:

        (path1,file1)=os.path.split(image_path)
        (filename1, filename2) = os.path.splitext(file1)
        objectfile = open('results/%s.txt' % filename1,'w')
        img = cv2.imread(image_path, cv2.IMREAD_COLOR)
        start=time.time()
        results = detect_one_image(img, pred_func)
        end=time.time()
        print(end-start)
        final = draw_final_outputs(img, results,objectfile)
        #viz = np.concatenate((img, final), axis=1)
        viz=final
        cv2.imwrite('outimages/%s.png'%filename1,viz)
예제 #30
0
def do_predict(pred_func, input_file):
    img = imread(input_file)
    if (len(img.shape) == 2):
        img = np.expand_dims(img, axis=2)
        img = np.repeat(img, 3, axis=2)

    img = scipy.misc.imresize(img, (800, 800))

    results = predict_image(img, pred_func)
    print(len(results))
    final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    #for result in results:
    #if(result.class_id != 3):
    #print(result)
    imsave('./output2/outputimg.tif', final)
    logger.info(
        "Inference output for {} written to outputimg.tif".format(input_file))
    return results
예제 #31
0
def do_predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = predict_image(img, pred_func)
    if cfg.MODE_MASK:
        final = draw_final_outputs_blackwhite(img, results)
    else:
        final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    '''fxl
    # modify file name
    cv2.imwrite("output.png", viz)
    logger.info("Inference output for {} written to output.png".format(input_file))
    '''
    base_name = os.path.basename(input_file)
    img_name = os.path.splitext(base_name)[0]
    result_file = "/content/drive/My Drive/Colab/tp/myoutput/predict_output/" + img_name + ".png"
    cv2.imwrite(result_file, viz)
    print(img_number, end='')
    print("." +
          "Inference for {} written to {}".format(base_name, result_file))
예제 #32
0
def predict(pred_func, input_file):
    img = cv2.imread(input_file, cv2.IMREAD_COLOR)
    results = detect_one_image(img, pred_func)
    final = draw_final_outputs(img, results)
    viz = np.concatenate((img, final), axis=1)
    tpviz.interactive_imshow(viz)