Exemplo n.º 1
0
def recognize_from_image(filename, det_model, rec_model, ga_model):
    # prepare input data
    img = load_image(filename)
    logger.debug(f'input image shape: {img.shape}')

    # load identities
    ident_names, ident_feats = load_identities(rec_model)

    img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)

    # inference
    logger.info('Start inference...')
    if args.benchmark:
        logger.info('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            faces = predict(img, det_model, rec_model, ga_model)
            end = int(round(time.time() * 1000))
            logger.info(f'\tailia processing time {end - start} ms')
    else:
        faces = predict(img, det_model, rec_model, ga_model)

    faces = face_identification(faces, ident_feats)

    # plot result
    res_img = draw_detection(img, faces, ident_names)

    # plot result
    savepath = get_savepath(args.savepath, filename)
    logger.info(f'saved at : {savepath}')
    cv2.imwrite(savepath, res_img)
Exemplo n.º 2
0
def recognize_from_image():
    # net initialize
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id)

    # input image loop
    for image_path in args.input:
        # prepare input data
        logger.info(image_path)
        img = cv2.imread(image_path)
        logger.debug(f'input image shape: {img.shape}')
        input_img = preprocess(img, True)

        # inference
        logger.info('Start inference...')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(5):
                start = int(round(time.time() * 1000))
                d1, d2, d3, d4, d5, d6, d7 = net.predict(
                    {'input.1': input_img}
                )
                end = int(round(time.time() * 1000))
                logger.info(f'\tailia processing time {end - start} ms')
        else:
            d1, d2, d3, d4, d5, d6, d7 = net.predict({'input.1': input_img})

        out_img = post_process(d1)
        savepath = get_savepath(args.savepath, image_path)
        logger.info(f'saved at : {savepath}')
        cv2.imwrite(savepath, out_img)
    logger.info('Script finished successfully.')
Exemplo n.º 3
0
def recognize_from_image(filename, detector):
    # prepare input data
    img_0 = load_image(filename)
    logger.debug(f'input image shape: {img_0.shape}')

    img = cv2.cvtColor(img_0, cv2.COLOR_BGRA2BGR)

    # inference
    logger.info('Start inference...')
    if args.benchmark:
        logger.info('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            parsing_result = detect_objects(img, detector)
            end = int(round(time.time() * 1000))
            logger.info(f'\tailia processing time {end - start} ms')
    else:
        parsing_result = detect_objects(img, detector)

    output_img = Image.fromarray(np.asarray(parsing_result, dtype=np.uint8))
    palette = get_palette(len(CATEGORY))
    output_img.putpalette(palette)
    savepath = get_savepath(args.savepath, filename)
    logger.info(f'saved at : {savepath}')
    output_img.save(savepath)
Exemplo n.º 4
0
def recognize_from_image():
    estimator = GazeEstimator(args.include_iris, args.include_head_pose)

    # input image loop
    for image_path in args.input:
        logger.info(image_path)
        src_img = cv2.imread(image_path)

        # inference
        logger.info('Start inference...')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for _ in range(5):
                start = int(round(time.time() * 1000))
                img_draw = estimator.predict_and_draw(src_img, args.draw_iris,
                                                      args.draw_head_pose)
                end = int(round(time.time() * 1000))
                logger.info(f'\tailia processing time {end - start} ms')
        else:
            img_draw = estimator.predict_and_draw(src_img, args.draw_iris,
                                                  args.draw_head_pose)

        savepath = get_savepath(args.savepath, image_path)
        logger.info(f'saved at : {savepath}')
        cv2.imwrite(savepath, img_draw)
    logger.info('Script finished successfully.')
Exemplo n.º 5
0
def recognize_from_image(filenames, net):
    for filename in filenames:
        # prepare input data
        img = load_image(filename)
        logger.info(f'input image shape: {img.shape}')

        img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)

        # inference
        logger.info('Start inference...')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(5):
                start = int(round(time.time() * 1000))
                bboxes = predict(img, net)
                end = int(round(time.time() * 1000))
                logger.info(f'\tailia processing time {end - start} ms')
        else:
            bboxes = predict(img, net)

        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        res_img = draw_bbox(img, bboxes)

        # plot result
        savepath = get_savepath(args.savepath, filename)
        logger.info(f'saved at : {savepath}')
        cv2.imwrite(savepath, res_img)
    logger.info('Script finished successfully.')
Exemplo n.º 6
0
def enhance_image():
    for image_path in args.input:
        # prepare input data
        img = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
        img = cv2.resize(img, dsize=(H, W))

        # net initialize
        model = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id)
        upsampler = RealESRGAN(model)

        # inference
        logger.info('Start inference...')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(5):
                start = int(round(time.time() * 1000))
                output = upsampler.enhance(img)
                end = int(round(time.time() * 1000))
                logger.info(f'\tailia processing time {end - start} ms')
        else:
            output = upsampler.enhance(img)

        savepath = get_savepath(args.savepath, image_path)
        logger.info(f'saved at : {savepath}')
        cv2.imwrite(savepath, output)

        logger.info('Script finished successfully.')
def recognize_from_image(filename, net):
    # prepare input data
    img = load_image(filename)
    logger.debug(f'input image shape: {img.shape}')

    img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)

    # inference
    logger.info('Start inference...')
    if args.benchmark:
        logger.info('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            keypoints = predict(img, net)
            end = int(round(time.time() * 1000))
            logger.info(f'\tailia processing time {end - start} ms')
    else:
        keypoints = predict(img, net)
    """
    plot result
    """
    res_img = draw_keypoints(img, keypoints)
    # cv2.imwrite(args.savepath, res_img)
    savepath = get_savepath(args.savepath, filename)
    logger.info(f'saved at : {savepath}')
    cv2.imwrite(savepath, res_img)
Exemplo n.º 8
0
def recognize_from_video(filename, net):
    capture = webcamera_utils.get_capture(args.video)

    # create video writer if savepath is specified as video format
    if args.savepath != SAVE_IMAGE_PATH:
        f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
        f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
        writer = webcamera_utils.get_writer(args.savepath, f_h, f_w)
    else:
        writer = None

    while True:
        ret, frame = capture.read()
        if cv2.waitKey(1) & 0xFF == ord('q') or not ret:
            break

        x = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        x = preprocess(x)
        preds_ailia = net.predict(x)
        search_gallery(net, preds_ailia, x[0].transpose(1, 2, 0))

        # save results
        if writer is not None:
            savepath = get_savepath(args.savepath, filename)
            plt.savefig(savepath, bbox_inches='tight')
            #writer.write(res_img)
        plt.show()
        #break

    capture.release()
    cv2.destroyAllWindows()
    if writer is not None:
        writer.release()
Exemplo n.º 9
0
def recognize_from_image(filename, detector):
    # load input image
    img = load_image(filename)
    img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)

    logger.info('Start inference...')
    if args.benchmark:
        logger.info('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            boxes, scores, cls_inds = detect_objects(img, detector)
            end = int(round(time.time() * 1000))
            logger.info(f'\tailia processing time {end - start} ms')
    else:
        boxes, scores, cls_inds = detect_objects(img, detector)

    try:
        logger.info('\n'.join([
            'pos:{}, ids:{}, score:{:.3f}'.format(
                '(%.1f,%.1f,%.1f,%.1f)' % (box[0], box[1], box[2], box[3]),
                COCO_CATEGORY[int(obj_cls)], score)
            for box, obj_cls, score in zip(boxes, cls_inds, scores)
        ]))
    except:
        # FIXME: do not use base 'except'
        pass

    # show image
    im2show = draw_detection(img, boxes, scores, cls_inds)

    savepath = get_savepath(args.savepath, filename)
    logger.info(f'saved at : {savepath}')
    cv2.imwrite(savepath, im2show)
Exemplo n.º 10
0
def recognize_from_image():
    # net initialize
    pose = ailia.PoseEstimator(MODEL_PATH,
                               WEIGHT_PATH,
                               env_id=args.env_id,
                               algorithm=ALGORITHM)

    # input image loop
    for image_path in args.input:
        # prepare input data
        logger.info(image_path)
        src_img = cv2.imread(image_path)
        input_image = load_image(image_path, (IMAGE_HEIGHT, IMAGE_WIDTH),
                                 normalize_type='None')
        input_data = cv2.cvtColor(input_image, cv2.COLOR_RGB2BGRA)

        # inference
        logger.info('Start inference...')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(5):
                start = int(round(time.time() * 1000))
                _ = pose.compute(input_data)
                end = int(round(time.time() * 1000))
                logger.info(f'\tailia processing time {end - start} ms')
        else:
            _ = pose.compute(input_data)

        # post-processing
        count = pose.get_object_count()
        logger.info(f'person_count={count}')
        display_result(src_img, pose)
        # cv2.imwrite(args.savepath, src_img)
        cv2.imwrite(get_savepath(args.savepath, image_path), src_img)
    logger.info('Script finished successfully.')
Exemplo n.º 11
0
def recognize_from_image(filename, net):
    # prepare input data
    img = load_image(filename)
    #logger.info(f'input image shape: {img.shape}')
    img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)
    img = preprocess(img)

    # inference
    logger.info('Start inference...')
    if args.benchmark:
        logger.info('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            preds_ailia = net.predict(img)
            end = int(round(time.time() * 1000))
            logger.info(f'\tailia processing time {end - start} ms')
    else:
        preds_ailia = net.predict(img)

    #logger.info(f'output shape: {preds_ailia.shape}')
    search_gallery(net, preds_ailia, filename)

    savepath = get_savepath(args.savepath, filename)
    plt.savefig(savepath, bbox_inches='tight')
    logger.info(f'saved at : {savepath}')
Exemplo n.º 12
0
def recognize_from_image():
    # Input image loop
    for image_path in args.input:
        logger.info(image_path)

        # Prepare input data.
        dataset = _prepare_data(args, image_path=image_path)

        # Initialize net.
        net = _initialize_net(args)

        # Inference
        logger.info("Start inference...")
        if args.benchmark:
            logger.info("BENCHMARK mode")
            for i in range(5):
                start = int(round(time.time() * 1000))
                _estimate(dataset[0], net, args)
                end = int(round(time.time() * 1000))
                logger.info(f"\tailia processing time {end - start} ms")
        else:
            depth_pred_col = _estimate(dataset[0], net, args)
            filepath = get_savepath(args.savepath, image_path, ext=".png")
            logger.info(f"saved at : {filepath}")
            fast_depth_utils.save_image(depth_pred_col, filepath)
    logger.info("Script finished successfully.")
Exemplo n.º 13
0
def recognize_from_image():
    # net initialize
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id)

    # input image loop
    for image_path in args.input:
        # prepare input data
        logger.info(image_path)
        img_input = cv2.imread(image_path)

        # inference
        logger.info('Start inference...')
        logger.warning(
            'Inference using CPU because model accuracy is low on GPU.')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(args.benchmark_count):
                start = int(round(time.time() * 1000))
                preds_img = gradio_wrapper_for_LSD(img_input, net)
                end = int(round(time.time() * 1000))
                logger.info(f'\tailia processing time {end - start} ms')
        else:
            preds_img = gradio_wrapper_for_LSD(img_input, net)

        # postprocessing
        savepath = get_savepath(args.savepath, image_path)
        logger.info(f'saved at : {savepath}')
        cv2.imwrite(savepath, preds_img)

    logger.info('Script finished successfully.')
Exemplo n.º 14
0
def recognize_from_image(img_path, net_maskrcnn, net_root, net_pose):
    # input image loop
    for image_path in img_path:
        # prepare input data
        logger.info(image_path)
        src_img = cv2.imread(image_path)

        # load image for pposenet
        original_img = cv2.imread(image_path)

        # cast to pillow for mask r-cnn
        image = Image.fromarray(original_img.copy()[:, :, ::-1])

        # exec mask r-cnn
        bbox_list = maskrcnn_to_image(image=image, net_maskrcnn=net_maskrcnn)
        bbox_list[:, 2] = bbox_list[:, 2] - bbox_list[:, 0]
        bbox_list[:, 3] = bbox_list[:, 3] - bbox_list[:, 1]
        # print('bbox_list =\n', (bbox_list).astype(np.int))

        # exec posenet
        vis_img = posenet_to_image(original_img=original_img,
                                   bbox_list=bbox_list,
                                   net_root=net_root,
                                   net_pose=net_pose)

        # output image
        savepath = get_savepath(args.savepath, image_path)
        logger.info(f'saved at : {savepath}')
        cv2.imwrite(args.savepath, vis_img)

    logger.info('finished process and write result to %s!' % args.savepath)
Exemplo n.º 15
0
def recognize_from_image(net):
    # input image loop
    for image_path in args.input:
        logger.info(image_path)

        # prepare input data
        img = load_image(image_path)
        img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)

        # inference
        logger.info('Start inference...')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            total_time_estimation = 0
            for i in range(args.benchmark_count):
                start = int(round(time.time() * 1000))
                out_img = predict(net, img)
                end = int(round(time.time() * 1000))
                estimation_time = (end - start)

                # Loggin
                logger.info(f'\tailia processing estimation time {estimation_time} ms')
                if i != 0:
                    total_time_estimation = total_time_estimation + estimation_time

            logger.info(f'\taverage time estimation {total_time_estimation / (args.benchmark_count - 1)} ms')
        else:
            out_img = predict(net, img)

        # plot result
        savepath = get_savepath(args.savepath, image_path, ext='.png')
        logger.info(f'saved at : {savepath}')
        cv2.imwrite(savepath, out_img)

    logger.info('Script finished successfully.')
Exemplo n.º 16
0
def recognize_from_video(detector):
    capture = webcamera_utils.get_capture(args.video)

    # create video writer if savepath is specified as video format
    if args.savepath != SAVE_IMAGE_PATH:
        f_h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
        f_w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
        save_h, save_w = f_h, f_w
        writer = webcamera_utils.get_writer(args.savepath, save_h, save_w)
    else:
        writer = None

    if args.write_prediction:
        frame_count = 0
        frame_digit = int(
            math.log10(capture.get(cv2.CAP_PROP_FRAME_COUNT)) + 1)
        video_name = os.path.splitext(os.path.basename(args.video))[0]

    while (True):
        ret, frame = capture.read()
        if (cv2.waitKey(1) & 0xFF == ord('q')) or not ret:
            break

        raw_img = frame
        if args.detector:
            detector.compute(raw_img, args.threshold, args.iou)
            res_img = plot_results(detector, raw_img, COCO_CATEGORY)
            detect_object = detector
        else:
            img, ratio = preprocess(raw_img, (HEIGHT, WIDTH))
            output = detector.run(img[None, :, :, :])
            predictions = postprocess(output[0], (HEIGHT, WIDTH))[0]
            detect_object = predictions_to_object(predictions, raw_img, ratio,
                                                  args.iou, args.threshold)
            detect_object = reverse_letterbox(
                detect_object, raw_img, (raw_img.shape[0], raw_img.shape[1]))
            res_img = plot_results(detect_object, raw_img, COCO_CATEGORY)
        cv2.imshow('frame', res_img)

        # save results
        if writer is not None:
            writer.write(res_img)

        # write prediction
        if args.write_prediction:
            savepath = get_savepath(
                args.savepath,
                video_name,
                post_fix='_%s' %
                (str(frame_count).zfill(frame_digit) + '_res'),
                ext='.png')
            pred_file = '%s.txt' % savepath.rsplit('.', 1)[0]
            write_predictions(pred_file, detect_object, frame, COCO_CATEGORY)
            frame_count += 1

    capture.release()
    cv2.destroyAllWindows()
    if writer is not None:
        writer.release()
    logger.info('Script finished successfully.')
def recognize_from_image(filename, net):
    # prepare input data
    img = load_image(filename)
    logger.debug(f'input image shape: {img.shape}')

    img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)

    # inference
    logger.info('Start inference...')
    if args.benchmark:
        logger.info('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            out_mask = predict(img, net)
            end = int(round(time.time() * 1000))
            logger.info(f'\tailia processing time {end - start} ms')
    else:
        out_mask = predict(img, net)

    if not args.orig_size:
        img = cv2.resize(img, (IMAGE_WIDTH, IMAGE_HEIGHT))
    res_img = np.ones(img.shape, np.uint8) * 255
    res_img[out_mask] = img[out_mask]

    # plot result
    savepath = get_savepath(args.savepath, filename)
    logger.info(f'saved at : {savepath}')
    cv2.imwrite(savepath, res_img)
    logger.info('Script finished successfully.')
Exemplo n.º 18
0
def recognize_from_image():
    # net initialize
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id)

    # input image loop
    for image_path in args.input:
        # prepare input data
        logger.info(image_path)
        org_img = load_image(image_path, (IMAGE_HEIGHT, IMAGE_WIDTH))

        input_data = load_image(image_path, (IMAGE_HEIGHT, IMAGE_WIDTH),
                                normalize_type='127.5',
                                gen_input_ailia=True)

        # inference
        logger.info('Start inference...')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(5):
                start = int(round(time.time() * 1000))
                preds_ailia = net.predict([input_data])
                end = int(round(time.time() * 1000))
                logger.info(f'\tailia processing time {end - start} ms')
        else:
            preds_ailia = net.predict([input_data])

        # post-processing
        detections = but.postprocess(preds_ailia)

        # generate detections
        savepath = get_savepath(args.savepath, image_path)
        logger.info(f'saved at : {savepath}')
        for detection in detections:
            but.plot_detections(org_img, detection, save_image_path=savepath)
    logger.info('Script finished successfully.')
Exemplo n.º 19
0
def recognize_from_image():
    # Input image loop
    for image_path in args.input:
        logger.info(image_path)

        # Prepare input data.
        dataset, image = _prepare_data(args, image_path=image_path)

        # Initialize net.
        net = _initialize_net(args)

        # Inference
        logger.info("Start inference...")
        if args.benchmark:
            logger.info("BENCHMARK mode")
            for i in range(5):
                start = int(round(time.time() * 1000))
                weather, prob = _estimate(dataset, net)
                end = int(round(time.time() * 1000))
                logger.info(f"\tailia processing time {end - start} ms")

        # show result
        weather, prob = _estimate(dataset, net)
        logger.info(f"result : {weather} {prob}")
        filepath = get_savepath(args.savepath, image_path, ext=".png")
        weather_prediction_from_image_utils.save_image(
            _output_text(weather, prob), image, filepath)
        logger.info(f"saved at : {filepath}")

    logger.info("Script finished successfully.")
Exemplo n.º 20
0
def recognize_from_image():
    # net initialize
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id)
    net.set_input_shape((1, 1, IMAGE_HEIGHT, IMAGE_WIDTH))

    # input image loop
    for image_path in args.input:
        # prepare input data
        logger.info(image_path)
        img = load_img(image_path)
        logger.debug(f'input image shape: {img.shape}')
        (img_lab_orig, img_lab_rs) = preprocess(img)

        # inference
        logger.info('Start inference...')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(5):
                start = int(round(time.time() * 1000))
                out = net.predict({'input.1': img_lab_rs})[0]
                end = int(round(time.time() * 1000))
                logger.info(f'\tailia processing time {end - start} ms')
        else:
            out = net.predict({'input.1': img_lab_rs})[0]

        out_img = post_process(out, img_lab_orig)
        savepath = get_savepath(args.savepath, image_path)
        logger.info(f'saved at : {savepath}')
        plt.imsave(savepath, out_img)
    logger.info('Script finished successfully.')
Exemplo n.º 21
0
def recognize_from_image(net, params):
    category = params['category']

    # input image loop
    for image_path in args.input:
        logger.info(image_path)

        # prepare input data
        img = load_image(image_path)
        img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGB)

        # inference
        logger.info('Start inference...')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(5):
                start = int(round(time.time() * 1000))
                pixel_labels = detect_objects(img, net, params['img_size'])
                end = int(round(time.time() * 1000))
                logger.info(f'\tailia processing time {end - start} ms')
        else:
            pixel_labels = detect_objects(img, net, params['img_size'])

        output_img = Image.fromarray(np.asarray(pixel_labels, dtype=np.uint8))
        # palette = get_palette(len(category))
        palette = list(itertools.chain.from_iterable(category.values()))
        output_img.putpalette(palette)

        savepath = get_savepath(args.savepath, image_path)
        logger.info(f'saved at : {savepath}')
        output_img.save(savepath)
Exemplo n.º 22
0
def recognize_from_image(net):
    # input image loop
    for image_path in args.input:
        # prepare input data
        logger.debug(f'input image: {image_path}')
        raw_img = cv2.imread(image_path)
        logger.debug(f'input image shape: {raw_img.shape}')

        img = crop_and_resize(raw_img, WIDTH, HEIGHT, args.arch, args.resize)
        img = preprocess(img, args.arch)

        # inference
        logger.info('Start inference...')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(5):
                start = int(round(time.time() * 1000))
                output, output_exist = net.run(img)
                end = int(round(time.time() * 1000))
                logger.info(f'\tailia processing time {end - start} ms')
        else:
            output, output_exist = net.run(img)

        output = postprocess(output, args.arch)
        out_img = colorize(output)

        savepath = get_savepath(args.savepath, image_path)
        logger.info(f'saved at : {savepath}')
        cv2.imwrite(savepath, out_img)

    logger.info('Script finished successfully.')
Exemplo n.º 23
0
def recognize_from_image():
    # net initialize
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id)

    # input image loop
    for image_path in args.input:
        # prepare input data
        logger.info(image_path)
        image = craft_pytorch_utils.load_image(image_path)
        logger.debug(f'input image shape: {image.shape}')
        x, ratio_w, ratio_h = craft_pytorch_utils.pre_process(image)
        net.set_input_shape((1, 3, x.shape[2], x.shape[3]))

        # inference
        logger.info('Start inference...')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(5):
                start = int(round(time.time() * 1000))
                y, _ = net.predict({'input.1': x})
                end = int(round(time.time() * 1000))
                logger.info(f'\tailia processing time {end - start} ms')
        else:
            y, _ = net.predict({'input.1': x})

        img = craft_pytorch_utils.post_process(y, image, ratio_w, ratio_h)
        savepath = get_savepath(args.savepath, image_path)
        logger.info(f'saved at : {savepath}')
        cv2.imwrite(savepath, img)
    logger.info('Script finished successfully.')
Exemplo n.º 24
0
def recognize_from_image():
    # net initialize
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id)

    # input image loop
    for image_path in args.input:
        # prepare input data
        logger.info(image_path)
        src_img = cv2.imread(image_path)
        input_data = load_image(
            image_path,
            (IMAGE_HEIGHT, IMAGE_WIDTH),
        )
        input_data = input_data[np.newaxis, :, :, :]
        net.set_input_shape(input_data.shape)

        # inference
        logger.info('Start inference...')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(5):
                start = int(round(time.time() * 1000))
                preds_ailia = net.predict(input_data)
                end = int(round(time.time() * 1000))
                logger.info(f'\tailia processing time {end - start} ms')
        else:
            preds_ailia = net.predict(input_data)

        # postprocessing
        pred = preds_ailia.reshape((IMAGE_HEIGHT, IMAGE_WIDTH))
        dst = transfer(src_img, pred)
        savepath = get_savepath(args.savepath, image_path)
        logger.info(f'saved at : {savepath}')
        cv2.imwrite(savepath, dst)
    logger.info('Script finished successfully.')
Exemplo n.º 25
0
def transform_image():
    """Full transormation on a single image loaded from filepath in arguments.
    """
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id)

    # input image loop
    for image_path in args.input:
        # prepare input data
        logger.info(image_path)
        image = cv2.imread(image_path)

        if args.face_recognition:
            locator = FaceLocator()
        else:
            locator = None

        logger.info('Start inference...')

        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(5):
                start = int(round(time.time() * 1000))

                out_image = process_frame(net, locator, image)

                end = int(round(time.time() * 1000))
                logger.info(f'\tailia processing time {end - start} ms')

        else:
            out_image = process_frame(net, locator, image)

        savepath = get_savepath(args.savepath, image_path)
        logger.info(f'saved at : {savepath}')
        cv2.imwrite(savepath, out_image[..., ::-1])
    return True
Exemplo n.º 26
0
def wavfile_input_recognition():
    if args.beamdecode:
        try:
            from ctcdecode import CTCBeamDecoder
        except ImportError:
            raise ImportError("BeamCTCDecoder requires paddledecoder package.")

        decoder = CTCBeamDecoder(
            LABELS,
            LM_PATH,
            ALPHA,
            BETA,
            CUTOFF_TOP_N,
            CUTOFF_PROB,
            BEAM_WIDTH,
            NUM_PROCESS,
            BRANK_LABEL_INDEX,
        )

    # net initialize
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id)

    for soundf_path in args.input:
        logger.info(soundf_path)
        if args.ailia_audio:
            wav,sr = sf.read(soundf_path)
            wav = ailia.audio.resample(wav,sr,SAMPLING_RATE)
        else:
            wav = librosa.load(soundf_path, sr=SAMPLING_RATE)[0]
        spectrogram = create_spectrogram(wav)
        net.set_input_shape(spectrogram[0].shape)

        # inference
        logger.info('Start inference...')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for c in range(5):
                start = int(round(time.time() * 1000))
                preds_ailia, output_length = net.predict(spectrogram)
                end = int(round(time.time() * 1000))
                logger.info("\tailia processing time {} ms".format(end-start))
        else:
            # Deep Speech output: output_probability, output_length
            preds_ailia, output_length = net.predict(spectrogram)

        if args.beamdecode:
            text = beam_ctc_decode(
                torch.from_numpy(preds_ailia),
                torch.from_numpy(output_length),
                decoder,
            )
        else:
            text = decode(preds_ailia[0], output_length)

        savepath = get_savepath(args.savepath, soundf_path, ext='.txt')
        logger.info(f'Results saved at : {savepath}')
        with open(savepath, 'w', encoding='utf-8') as f:
            f.write(text)
        logger.info(f'predict sentence:\n{text}')
    logger.info('Script finished successfully.')
Exemplo n.º 27
0
def recognize_from_image(filename, detector, pp_net):
    # prepare input data
    img_0 = load_image(filename)
    logger.debug(f'input image shape: {img_0.shape}')

    img = cv2.cvtColor(img_0, cv2.COLOR_BGRA2RGB)

    # inference
    logger.info('Start inference...')
    if args.benchmark:
        logger.info('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            detect_object, seg_masks = detect_objects(img, detector, pp_net)
            end = int(round(time.time() * 1000))
            logger.info(f'\tailia processing time {end - start} ms')
    else:
        detect_object, seg_masks = detect_objects(img, detector, pp_net)

    # plot result
    res_img = plot_results(detect_object,
                           img_0,
                           CATEGORY,
                           segm_masks=seg_masks)
    savepath = get_savepath(args.savepath, filename)
    logger.info(f'saved at : {savepath}')
    cv2.imwrite(savepath, res_img)
Exemplo n.º 28
0
def transfer_to_image():
    # Prepare input data
    preprocess = PreProcess(config, args, face_parser_path,
                            face_alignment_path, face_detector_path)
    source, real_A, mask_A, diff_A, crop_face = _prepare_data(
        args, preprocess, "source")
    _, real_B, mask_B, diff_B, _ = _prepare_data(args, preprocess, "reference")

    # Net initialize
    net = _initialize_net(args)

    # Inference
    logger.info("Start inference...")
    if args.benchmark:
        logger.info("BENCHMARK mode")
        for i in range(5):
            start = int(round(time.time() * 1000))
            out = _transfer(real_A, real_B, mask_A, mask_B, diff_A, diff_B,
                            net)
            end = int(round(time.time() * 1000))
            logger.info(f"\tailia processing time {end - start} ms")
    else:
        out = _transfer(real_A, real_B, mask_A, mask_B, diff_A, diff_B, net)

    # Postprocessing
    postprocess = PostProcess(config)
    image = _postprocessing(out[0], source, crop_face, postprocess)
    savepath = get_savepath(args.savepath, args.input[0])
    image.save(savepath)
    logger.info(f"saved at : {savepath}")
    logger.info("Script finished successfully.")
Exemplo n.º 29
0
def unwarp_from_image():
    # net initialize
    bm_net = ailia.Net(BM_MODEL_PATH, BM_WEIGHT_PATH, env_id=args.env_id)
    wc_net = ailia.Net(WC_MODEL_PATH, WC_WEIGHT_PATH, env_id=args.env_id)

    # input image loop
    for image_path in args.input:
        # prepare input data
        logger.info(image_path)
        org_img = cv2.imread(image_path)
        img = load_image(
            image_path,
            (WC_IMG_HEIGHT, WC_IMG_WIDTH),
            normalize_type='255',
            gen_input_ailia=True,
        )

        # inference
        logger.info('Start inference...')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(5):
                start = int(round(time.time() * 1000))
                uwpred = run_inference(wc_net, bm_net, img, org_img)
                end = int(round(time.time() * 1000))
                logger.info("\tailia processing time {} ms".format(end-start))
        else:
            uwpred = run_inference(wc_net, bm_net, img, org_img)

        savepath = get_savepath(args.savepath, image_path)
        logger.info(f'saved at : {savepath}')
        cv2.imwrite(savepath, uwpred * 255)
    logger.info('Script finished successfully.')
Exemplo n.º 30
0
def recognize_from_image():
    # net initialize
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id)

    # input image loop
    for image_path in args.input:
        # prepare input data
        logger.info(image_path)
        input_data = load_image(
            image_path,
            (IMAGE_HEIGHT, IMAGE_WIDTH),
            normalize_type='255',
            gen_input_ailia=True,
        )

        # inference
        logger.info('Start inference...')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(5):
                start = int(round(time.time() * 1000))
                preds_ailia = net.predict(input_data)
                end = int(round(time.time() * 1000))
                logger.info(f'\tailia processing time {end - start} ms')
        else:
            preds_ailia = net.predict(input_data)

        # postprocessing
        output_img = preds_ailia[0].transpose((1, 2, 0))
        output_img = cv2.cvtColor(output_img, cv2.COLOR_RGB2BGR)
        savepath = get_savepath(args.savepath, image_path)
        logger.info(f'saved at : {savepath}')
        cv2.imwrite(savepath, output_img * 255)
    logger.info('Script finished successfully.')