Exemplo n.º 1
0
def recognize_from_video():
    # net initialize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=env_id)
    net.set_input_shape((1, IMAGE_HEIGHT, IMAGE_WIDTH, 3))

    if args.video == '0':
        print('[INFO] Webcam mode is activated')
        capture = cv2.VideoCapture(0)
        if not capture.isOpened():
            print("[ERROR] webcamera not found")
            sys.exit(1)
    else:
        if check_file_existance(args.video):
            capture = cv2.VideoCapture(args.video)

    fig = create_figure()
    tight_layout = True

    while(True):
        ret, frame = capture.read()
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        if not ret:
            continue

        input_image, resized_img = adjust_frame_size(
            frame, IMAGE_HEIGHT, IMAGE_WIDTH
        )
        resized_img = cv2.cvtColor(resized_img, cv2.COLOR_BGR2RGB)

        if args.apply_rotate:
            rotation_angle = np.random.randint(360)
            rotated_img = generate_rotated_image(
                resized_img,
                rotation_angle,
                size=(IMAGE_HEIGHT, IMAGE_WIDTH),
                crop_center=True,
                crop_largest_rect=True
            )
            input_data = rotated_img.reshape((1, IMAGE_HEIGHT, IMAGE_WIDTH, 3))
        else:
            rotation_angle = 0
            rotated_img = resized_img
            input_data = rotated_img.reshape((1, IMAGE_HEIGHT, IMAGE_WIDTH, 3))

        # inference
        preds_ailia = net.predict(input_data)

        # visualize
        predicted_angle = np.argmax(preds_ailia, axis=1)[0]
        plt = visualize(fig, rotated_img, rotation_angle, predicted_angle, tight_layout)
        plt.pause(.01)
        tight_layout = False

    capture.release()
    cv2.destroyAllWindows()
    print('Script finished successfully.')
Exemplo n.º 2
0
def recognize_from_image():
    # net initialize
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id)

    # input image loop
    for image_path in args.input:
        # prepare input data
        logger.info(image_path)
        org_img = cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB)

        if args.apply_rotate:
            rotation_angle = np.random.randint(360)
            rotated_img = generate_rotated_image(org_img,
                                                 rotation_angle,
                                                 size=(IMAGE_HEIGHT,
                                                       IMAGE_WIDTH),
                                                 crop_center=True,
                                                 crop_largest_rect=True)
            input_data = rotated_img.reshape((1, IMAGE_HEIGHT, IMAGE_WIDTH, 3))
        else:
            rotation_angle = 0
            rotated_img = cv2.resize(org_img, (IMAGE_HEIGHT, IMAGE_WIDTH))
            input_data = rotated_img.reshape((1, IMAGE_HEIGHT, IMAGE_WIDTH, 3))

        net.set_input_shape(input_data.shape)

        # inference
        logger.info('Start inference...')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(5):
                start = int(round(time.time() * 1000))
                preds_ailia = net.predict(input_data)
                end = int(round(time.time() * 1000))
                logger.info(f'\tailia processing time {end - start} ms')
        else:
            preds_ailia = net.predict(input_data)

        # visualize
        predicted_angle = np.argmax(preds_ailia, axis=1)[0]
        fig = create_figure()
        plt = visualize(fig, rotated_img, rotation_angle, predicted_angle)
        savepath = get_savepath(args.savepath, image_path)
        logger.info(f'saved at : {savepath}')
        plt.savefig(savepath)

    logger.info('Script finished successfully.')
Exemplo n.º 3
0
def recognize_from_image():
    # prepare input data
    org_img = cv2.cvtColor(cv2.imread(args.input), cv2.COLOR_BGR2RGB)

    if args.apply_rotate:
        rotation_angle = np.random.randint(360)
        rotated_img = generate_rotated_image(org_img,
                                             rotation_angle,
                                             size=(IMAGE_HEIGHT, IMAGE_WIDTH),
                                             crop_center=True,
                                             crop_largest_rect=True)
        input_data = rotated_img.reshape((1, IMAGE_HEIGHT, IMAGE_WIDTH, 3))
    else:
        rotation_angle = 0
        rotated_img = cv2.resize(org_img, (IMAGE_HEIGHT, IMAGE_WIDTH))
        input_data = rotated_img.reshape((1, IMAGE_HEIGHT, IMAGE_WIDTH, 3))

    # net initialize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=env_id)
    net.set_input_shape(input_data.shape)

    # inference
    print('Start inference...')
    if args.benchmark:
        print('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            preds_ailia = net.predict(input_data)
            end = int(round(time.time() * 1000))
            print(f'\tailia processing time {end - start} ms')
    else:
        preds_ailia = net.predict(input_data)

    # visualize
    predicted_angle = np.argmax(preds_ailia, axis=1)[0]
    plt = visualize(rotated_img, rotation_angle, predicted_angle)
    plt.savefig(args.savepath)

    print('Script finished successfully.')
Exemplo n.º 4
0
def recognize_from_video():
    # net initialize
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id)
    net.set_input_shape((1, IMAGE_HEIGHT, IMAGE_WIDTH, 3))

    capture = webcamera_utils.get_capture(args.video)

    # create video writer if savepath is specified as video format
    if args.savepath != SAVE_IMAGE_PATH:
        logger.warning(
            'currently, video results cannot be output correctly...')
        # TODO: DEBUG: shape
        f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
        f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
        writer = webcamera_utils.get_writer(args.savepath, f_h, f_w)
    else:
        writer = None

    fig = create_figure()
    tight_layout = True

    while (True):
        ret, frame = capture.read()
        if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret:
            break

        input_image, resized_img = webcamera_utils.adjust_frame_size(
            frame, IMAGE_HEIGHT, IMAGE_WIDTH)
        resized_img = cv2.cvtColor(resized_img, cv2.COLOR_BGR2RGB)

        if args.apply_rotate:
            rotation_angle = np.random.randint(360)
            rotated_img = generate_rotated_image(resized_img,
                                                 rotation_angle,
                                                 size=(IMAGE_HEIGHT,
                                                       IMAGE_WIDTH),
                                                 crop_center=True,
                                                 crop_largest_rect=True)
            input_data = rotated_img.reshape((1, IMAGE_HEIGHT, IMAGE_WIDTH, 3))
        else:
            rotation_angle = 0
            rotated_img = resized_img
            input_data = rotated_img.reshape((1, IMAGE_HEIGHT, IMAGE_WIDTH, 3))

        # inference
        preds_ailia = net.predict(input_data)

        # visualize
        predicted_angle = np.argmax(preds_ailia, axis=1)[0]
        plt = visualize(fig, rotated_img, rotation_angle, predicted_angle,
                        tight_layout)
        plt.pause(.01)
        if not plt.get_fignums():
            break
        tight_layout = False

        # # save results
        # if writer is not None:
        #     writer.write(res_img)

    capture.release()
    cv2.destroyAllWindows()
    if writer is not None:
        writer.release()
    logger.info('Script finished successfully.')