def test(epoch, model, input_path, output_path, f_score):
    model.eval()

    image_list = os.listdir(input_path)
    logger.info(
        "         ----------------------------------------------------------------"
    )
    logger.info("                    Starting Eval...")
    logger.info(
        "         ----------------------------------------------------------------"
    )
    for one_image in tqdm(image_list):
        image_path = os.path.join(input_path, one_image)
        img = Image.open(image_path)

        filename, file_ext = os.path.splitext(os.path.basename(one_image))
        res_file = output_path + "res_" + filename + '.txt'

        boxes = detect(img, model, device)

        with open(res_file, 'w') as f:
            if boxes is None:
                continue
            for i, box in enumerate(boxes):
                poly = np.array(box).astype(np.int32)
                points = np.reshape(poly, -1)
                strResult = ','.join([
                    str(points[0]),
                    str(points[1]),
                    str(points[2]),
                    str(points[3]),
                    str(points[4]),
                    str(points[5]),
                    str(points[6]),
                    str(points[7])
                ]) + '\r\n'
                f.write(strResult)

    f_score_new = getresult(output_path)
    try:
        if f_score_new > f_score:
            state_dict = model.module.state_dict(
            ) if data_parallel else model.state_dict()
            torch.save(
                state_dict,
                os.path.join(args.workspace,
                             'best_model.pth'.format(epoch + 1)))
            f_score = f_score_new
    except:
        print(f_score_new)

    logger.info("\n")
    logger.info(
        "         ---------------------------------------------------------")
    logger.info(
        "                 current f_score: {:.3f} best_f_score: {:.3f}".format(
            f_score_new, f_score))
    logger.info(
        "         ---------------------------------------------------------")
    return f_score
Exemplo n.º 2
0
def test(model,args,ther_):
    model.eval()
    if args.is_test:
        output_path = os.path.join(args.workspace, "17_submit_test")
        input_path = os.path.join(args.eval_path, "test_image")
    else:
        output_path = os.path.join(args.workspace, "17_submit")
        input_path = os.path.join(args.eval_path, "val_image")
    image_list = os.listdir(input_path)
    print("     ----------------------------------------------------------------")
    print("                           Starting Eval...")
    print("     ----------------------------------------------------------------")

    if os.path.exists(output_path):
        shutil.rmtree(output_path)
    if not os.path.exists(output_path):
        os.makedirs(output_path)
    # for size in range(640,2048,32):
    #     print("short_line:",size)
    for one_image in tqdm(image_list):
        image_path = os.path.join(input_path, one_image)
        img = Image.open(image_path).convert('RGB')
        orign_img = cv2.imread(image_path)
        filename, file_ext = os.path.splitext(os.path.basename(one_image))
        if args.is_test:
            filename = filename.split("ts_")[-1]
            res_file = output_path + "/res_" + filename + '.txt'
        else:
            res_file = output_path + "/res_" + filename + '.txt'

        vis_file = args.vis_path + filename + '.jpg'
        boxes = detect_17(img, model, device)

        with open(res_file, 'w') as f:
            if boxes is None:
                continue
            for i, box in enumerate(boxes):
                poly = np.array(box).astype(np.int32)
                points = np.reshape(poly, -1)
                if args.is_test:
                    strResult = ','.join(
                        [str(points[0]), str(points[1]), str(points[2]), str(points[3]), str(points[4]), str(points[5]),
                         str(points[6]), str(points[7]), str("1.0")]) + '\r\n'
                else:
                    strResult = ','.join(
                        [str(points[0]), str(points[1]), str(points[2]), str(points[3]), str(points[4]), str(points[5]),
                         str(points[6]), str(points[7])]) + '\r\n'

                f.write(strResult)
            if args.vis:
                for bbox in boxes:
                    # bbox = bbox / scale.repeat(int(len(bbox) / 2))
                    bbox = np.array(bbox,np.int)
                    cv2.drawContours(orign_img, [bbox[:8].reshape(int(bbox.shape[0] / 2), 2)], -1, (0, 0, 255), 2)
                cv2.imwrite(vis_file, orign_img)
    if not args.is_test:
        f_score_new = getresult(output_path,args.gt_name)
Exemplo n.º 3
0
        filename, file_ext = os.path.splitext(os.path.basename(one_image))
        res_file = res_img + "res_" + filename + '.txt'

        boxes, score = detect(img, model, device)

        with open(res_file, 'w') as f:
            if boxes is None:
                continue
            for i, box in enumerate(boxes):
                poly = np.array(box).astype(np.int32)
                points = np.reshape(poly, -1)
                strResult = ','.join([
                    str(points[0]),
                    str(points[1]),
                    str(points[2]),
                    str(points[3]),
                    str(points[4]),
                    str(points[5]),
                    str(points[6]),
                    str(points[7])
                ]) + '\r\n'

                cv2.drawContours(img_show, [poly[:8].reshape(4, 2)], -1,
                                 (0, 0, 255), 2)
                f.write(strResult)
        cv2.imwrite(output_path, img_show)
    f_score = getresult(res_img)
    print("f_score:", f_score)
    # plot_img = plot_boxes(img, boxes)
    # plot_img.save(res_img)