예제 #1
0
def main():
    info = {
        ("celeba", 256):
        (WEIGHT_CELEBA256_PATH, MODEL_CELEBA256_PATH, (256, 256)),
        ("places", 256):
        (WEIGHT_PLACES256_PATH, MODEL_PLACES256_PATH, (256, 256)),
        ("places", 512):
        (WEIGHT_PLACES512_PATH, MODEL_PLACES512_PATH, (512, 512)),
        ("places", 1024):
        (WEIGHT_PLACES1024_PATH, MODEL_PLACES1024_PATH, (1024, 1024))
    }
    key = (args.model, args.img_res)
    if key not in info:
        logger.error("(MODEL = %s, IMG_RESOLUTION = %s) is unmatch." % key)
        logger.info("appropriate settings:\n"
                    "\t(MODEL = celeba, IMG_RESOLUTION = 256)\n"
                    "\t(MODEL = places, IMG_RESOLUTION = 256 or 512 or 1024)")
        sys.exit(-1)

    if "FP16" in ailia.get_environment(
            args.env_id).props or platform.system() == 'Darwin':
        logger.warning('This model do not work on FP16. So use CPU mode.')
        args.env_id = 0

    # model files check and download
    weight_path, model_path, img_shape = info[key]
    check_and_download_models(weight_path, model_path, REMOTE_PATH)

    # net initialize
    net = ailia.Net(model_path, weight_path, env_id=args.env_id)

    recognize_from_image(net, img_shape)
예제 #2
0
def recognize_from_image():
    # prepare input data
    image = Image.open(args.input)
    input_data = preprocess(image)

    # net initialize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')
    if env_id != -1 and ailia.get_environment(env_id).props=="LOWPOWER":
        env_id = -1 # This model requires fuge gpu memory so fallback to cpu mode
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=env_id)
    net.set_input_shape(input_data.shape)

    # inference
    print('Start inference...')
    if args.benchmark:
        print('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            boxes, labels, scores, masks = net.predict([input_data])
            end = int(round(time.time() * 1000))
            print(f'\tailia processing time {end - start} ms')
    else:
        boxes, labels, scores, masks = net.predict([input_data])

    # postprocessing
    fig, ax = create_figure()
    display_objdetect_image(
        fig, ax, image, boxes, labels, scores, masks, savepath=args.savepath
    )
    print('Script finished successfully.')
예제 #3
0
def recognize_from_video():
    # net initialize
    # This model requires fuge gpu memory so fallback to cpu mode
    env_id = args.env_id
    if env_id != -1 and ailia.get_environment(env_id).props == "LOWPOWER":
        env_id = -1
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=env_id)

    capture = webcamera_utils.get_capture(args.video)

    # create video writer if savepath is specified as video format
    if args.savepath != SAVE_IMAGE_PATH:
        print(
            '[WARNING] currently, video results cannot be output correctly...'
        )
        f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
        f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
        writer = webcamera_utils.get_writer(args.savepath, f_h, f_w)
    else:
        writer = None

    fig, ax = create_figure()

    while(True):
        ret, frame = capture.read()
        if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret:
            break

        frame = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
        input_data = preprocess(frame)
        net.set_input_shape(input_data.shape)

        boxes, labels, scores, masks = net.predict([input_data])

        ax.clear()
        display_objdetect_image(fig, ax, frame, boxes, labels, scores, masks)
        plt.pause(.01)
        if not plt.get_fignums():
            break

        # save results
        # if writer is not None:
        #     writer.write(frame)

    capture.release()
    cv2.destroyAllWindows()
    if writer is not None:
        writer.release()
    print('Script finished successfully.')
예제 #4
0
def main():
    # model files check and download
    check_and_download_models(WEIGHT_PATH, MODEL_PATH, REMOTE_PATH)

    if "FP16" in ailia.get_environment(
            args.env_id).props or platform.system() == 'Darwin':
        logger.warning('This model do not work on FP16. So use CPU mode.')
        args.env_id = 0

    if args.video is not None:
        # video mode
        recognize_from_video()
    else:
        # image mode
        recognize_from_image()
예제 #5
0
def recognize_from_video():
    # net initialize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')

    # This model requires fuge gpu memory so fallback to cpu mode
    if env_id != -1 and ailia.get_environment(env_id).props == "LOWPOWER":
        env_id = -1
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=env_id)

    if args.video == '0':
        print('[INFO] Webcam mode is activated')
        capture = cv2.VideoCapture(0)
        if not capture.isOpened():
            print("[ERROR] webcamera not found")
            sys.exit(1)
    else:
        if check_file_existance(args.video):
            capture = cv2.VideoCapture(args.video)

    fig, ax = create_figure()

    while(True):
        ret, frame = capture.read()
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        if not ret:
            continue

        frame = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
        input_data = preprocess(frame)
        net.set_input_shape(input_data.shape)

        boxes, labels, scores, masks = net.predict([input_data])

        ax.clear()
        display_objdetect_image(fig, ax, frame, boxes, labels, scores, masks)
        plt.pause(.01)

    capture.release()
    cv2.destroyAllWindows()
    print('Script finished successfully.')
예제 #6
0
def update_parser(parser, check_input_type=True, large_model=False):
    """Default check or update configurations should be placed here

    Parameters
    ----------
    parser : ArgumentParser()

    Returns
    -------
    args : ArgumentParser()
        (parse_args() will be done here)
    """
    args = parser.parse_args()

    # -------------------------------------------------------------------------
    # 0. logger level update
    if args.debug:
        logger.setLevel(DEBUG)

    # -------------------------------------------------------------------------
    # 1. check env_id count
    if AILIA_EXIST:
        count = ailia.get_environment_count()
        if count <= args.env_id:
            logger.error(f'specified env_id: {args.env_id} cannot found. ')
            logger.info('env_id updated to 0')
            args.env_id = 0

        if large_model:
            if args.env_id == ailia.get_gpu_environment_id(
            ) and ailia.get_environment(args.env_id).props == "LOWPOWER":
                args.env_id = 0  # cpu
                logger.warning(
                    'This model requires fuge gpu memory so fallback to cpu mode'
                )

        if args.env_list:
            for idx in range(count):
                env = ailia.get_environment(idx)
                logger.info("  env[" + str(idx) + "]=" + str(env))

        if args.env_id == ailia.ENVIRONMENT_AUTO:
            args.env_id = ailia.get_gpu_environment_id()
            if args.env_id == ailia.ENVIRONMENT_AUTO:
                logger.info('env_id updated to 0')
                args.env_id = 0
            else:
                logger.info('env_id updated to ' + str(args.env_id) +
                            '(from get_gpu_environment_id())')

        logger.info(f'env_id: {args.env_id}')

        env = ailia.get_environment(args.env_id)
        logger.info(f'{env.name}')

    # -------------------------------------------------------------------------
    # 2. update input
    if args.video is not None:
        args.ftype = 'video'
        args.input = None  # force video mode

    if args.input is None:
        # TODO: args.video, args.input is vague...
        # input is None --> video mode maybe?
        pass
    elif isinstance(args.input, list):
        # LIST --> nothing will be changed here.
        pass
    elif os.path.isdir(args.input):
        # Directory Path --> generate list of inputs
        files_grapped = []
        in_dir = args.input
        for extension in EXTENSIONS[args.ftype]:
            files_grapped.extend(glob.glob(os.path.join(in_dir, extension)))
        logger.info(f'{len(files_grapped)} {args.ftype} files found!')

        args.input = sorted(files_grapped)

        # create save directory
        if args.savepath is None:
            pass
        else:
            if '.' in args.savepath:
                logger.warning('Please specify save directory as --savepath '
                               'if you specified a direcotry for --input')
                logger.info(f'[{in_dir}_results] directory will be created')
                if in_dir[-1] == '/':
                    in_dir = in_dir[:-1]
                args.savepath = in_dir + '_results'
            os.makedirs(args.savepath, exist_ok=True)
            logger.info(f'output saving directory: {args.savepath}')

    elif os.path.isfile(args.input):
        args.input = [args.input]
    else:
        if check_input_type:
            logger.error('specified input is not file path nor directory path')
            sys.exit(0)

    # -------------------------------------------------------------------------
    return args
예제 #7
0
def main():
    # This model requires fuge gpu memory so fallback to cpu mode
    env_id = args.env_id
    if env_id != -1 and ailia.get_environment(env_id).props == "LOWPOWER":
        env_id = -1

    # get default config value and merge args
    config = get_default_config()
    config['det_limit_side_len'] = args.det_limit_side_len
    config['det_limit_type'] = args.det_limit_type
    lang_tmp = args.language.lower()
    if (lang_tmp == 'japanese') | (lang_tmp == 'jpn') | (lang_tmp == 'jp'):
        if (args.case == 'mobile'):
            config = set_config(config, WEIGHT_PATH_DET_CHN,
                                WEIGHT_PATH_REC_JPN_MBL, DICT_PATH_REC_JPN_MBL,
                                WEIGHT_PATH_CLS_CHN)
        elif (args.case == 'server'):
            config = set_config(config, WEIGHT_PATH_DET_CHN,
                                WEIGHT_PATH_REC_JPN_SVR, DICT_PATH_REC_JPN_SVR,
                                WEIGHT_PATH_CLS_CHN)
    elif (lang_tmp == 'english') | (lang_tmp == 'eng') | (lang_tmp == 'en'):
        if (args.case == 'mobile'):
            config = set_config(config, WEIGHT_PATH_DET_CHN,
                                WEIGHT_PATH_REC_ENG_MBL, DICT_PATH_REC_ENG_MBL,
                                WEIGHT_PATH_CLS_CHN)
    elif (lang_tmp == 'chinese') | (lang_tmp == 'chi') | (lang_tmp == 'ch'):
        if (args.case == 'mobile'):
            config = set_config(config, WEIGHT_PATH_DET_CHN,
                                WEIGHT_PATH_REC_CHN_MBL, DICT_PATH_REC_CHN_MBL,
                                WEIGHT_PATH_CLS_CHN)
        elif (args.case == 'server'):
            config = set_config(config, WEIGHT_PATH_DET_CHN,
                                WEIGHT_PATH_REC_CHN_SVR, DICT_PATH_REC_CHN_SVR,
                                WEIGHT_PATH_CLS_CHN)
    elif (lang_tmp == 'german') | (lang_tmp == 'ger') | (lang_tmp == 'ge'):
        if (args.case == 'mobile'):
            config = set_config(config, WEIGHT_PATH_DET_CHN,
                                WEIGHT_PATH_REC_GER_MBL, DICT_PATH_REC_GER_MBL,
                                WEIGHT_PATH_CLS_CHN)
    elif (lang_tmp == 'french') | (lang_tmp == 'fre') | (lang_tmp == 'fr'):
        if (args.case == 'mobile'):
            config = set_config(config, WEIGHT_PATH_DET_CHN,
                                WEIGHT_PATH_REC_FRE_MBL, DICT_PATH_REC_FRE_MBL,
                                WEIGHT_PATH_CLS_CHN)
    elif (lang_tmp == 'korean') | (lang_tmp == 'kor') | (lang_tmp == 'ko'):
        if (args.case == 'mobile'):
            config = set_config(config, WEIGHT_PATH_DET_CHN,
                                WEIGHT_PATH_REC_KOR_MBL, DICT_PATH_REC_KOR_MBL,
                                WEIGHT_PATH_CLS_CHN)

    # model files check and download
    weight_path_det = config['det_model_path']
    model_path_det = config['det_model_path'] + '.prototxt'
    check_and_download_models(weight_path_det, model_path_det, REMOTE_PATH)
    weight_path_cls = config['cls_model_path']
    model_path_cls = config['cls_model_path'] + '.prototxt'
    check_and_download_models(weight_path_cls, model_path_cls, REMOTE_PATH)
    weight_path_rec = config['rec_model_path']
    model_path_rec = config['rec_model_path'] + '.prototxt'
    check_and_download_models(weight_path_rec, model_path_rec, REMOTE_PATH)

    # build ocr class
    text_sys = TextSystem(config, env_id)

    if args.video is not None:
        recognize_from_video(config, text_sys)
    else:
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(args.benchmark_count):
                start = int(round(time.time() * 1000))
                recognize_from_image(config, text_sys)
                end = int(round(time.time() * 1000))
                logger.info(f'\tailia processing time {end - start} ms')
        else:
            recognize_from_image(config, text_sys)