예제 #1
0
def recognize_from_image():
    # prepare input data
    org_img = load_image(
        args.input,
        (IMAGE_HEIGHT, IMAGE_WIDTH),
    )

    input_data = load_image(args.input, (IMAGE_HEIGHT, IMAGE_WIDTH),
                            normalize_type='127.5',
                            gen_input_ailia=True)

    # net initialize
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id)

    # inference
    print('Start inference...')
    if args.benchmark:
        print('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            preds_ailia = net.predict([input_data])
            end = int(round(time.time() * 1000))
            print(f'\tailia processing time {end - start} ms')
    else:
        preds_ailia = net.predict([input_data])

    # postprocessing
    detections = but.postprocess(preds_ailia)

    # generate detections
    for detection in detections:
        but.plot_detections(org_img, detection, save_image_path=args.savepath)
    print('Script finished successfully.')
예제 #2
0
def recognize_from_image():
    # prepare input data
    org_img = load_image(
        args.input,
        (IMAGE_HEIGHT, IMAGE_WIDTH),
    )

    input_data = load_image(args.input, (IMAGE_HEIGHT, IMAGE_WIDTH),
                            normalize_type='127.5',
                            gen_input_ailia=True)

    # net initialize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=env_id)

    # compute execution time
    for i in range(5):
        start = int(round(time.time() * 1000))
        # preds_ailia = net.predict(input_data)
        input_blobs = net.get_input_blob_list()
        net.set_input_blob_data(input_data, input_blobs[0])
        net.update()
        preds_ailia = net.get_results()
        end = int(round(time.time() * 1000))
        print(f'ailia processing time {end - start} ms')

    # postprocessing
    detections = postprocess(preds_ailia)

    # generate detections
    for detection in detections:
        plot_detections(org_img, detection, save_image_path=args.savepath)
    print('Script finished successfully.')
def recognize_from_image():
    # prepare input data
    input_data = load_image(args.input, (IMAGE_HEIGHT, IMAGE_WIDTH),
                            normalize_type='ImageNet',
                            gen_input_ailia=True)
    src_img = load_image(args.input, (IMAGE_HEIGHT, IMAGE_WIDTH),
                         normalize_type='None')

    # net initialize
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id)

    # inference
    print('Start inference...')
    if args.benchmark:
        print('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            preds_ailia = net.predict(input_data)
            end = int(round(time.time() * 1000))
            print(f'\tailia processing time {end - start} ms')
    else:
        preds_ailia = net.predict(input_data)

    # postprocessing
    res_img = postprocess(src_img, preds_ailia)
    cv2.imwrite(args.savepath, res_img)
    print('Script finished successfully.')
예제 #4
0
def image_style_transfer():
    # prepare input data
    input_img = load_image(args.input, (IMAGE_HEIGHT, IMAGE_WIDTH),
                           normalize_type='255',
                           gen_input_ailia=True)

    src_h, src_w, _ = cv2.imread(args.input).shape
    style_img = load_image(args.style, (IMAGE_HEIGHT, IMAGE_WIDTH),
                           normalize_type='255',
                           gen_input_ailia=True)

    # net initialize
    vgg = ailia.Net(VGG_MODEL_PATH, VGG_WEIGHT_PATH, env_id=args.env_id)
    decoder = ailia.Net(DEC_MODEL_PATH, DEC_WEIGHT_PATH, env_id=args.env_id)

    # inference
    print('Start inference...')
    if args.benchmark:
        print('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            preds_ailia = style_transfer(vgg, decoder, input_img, style_img)
            end = int(round(time.time() * 1000))
            print(f'\tailia processing time {end - start} ms')
    else:
        preds_ailia = style_transfer(vgg, decoder, input_img, style_img)

    res_img = cv2.cvtColor(preds_ailia[0].transpose(1, 2, 0),
                           cv2.COLOR_RGB2BGR)
    res_img = cv2.resize(res_img, (src_w, src_h))
    cv2.imwrite(args.savepath, np.clip(res_img * 255 + 0.5, 0, 255))
    print('Script finished successfully.')
예제 #5
0
def recognize_from_image(filename, net, onnx=False): 

    # face alignment
    aligned = align_face(filename, args, face_alignment_path, face_detector_path)
    if aligned is not None:
        path = os.path.join(ALIGNED_PATH, filename.split('/')[-1])
        aligned.save(path)
    else: 
        path = filename

    input_img = load_image(
        path,
        (IMAGE_HEIGHT, IMAGE_WIDTH),
        normalize_type='255',
        gen_input_ailia=True,
    )

    input_img_resized = load_image(
        path,
        (RESIZE_HEIGHT, RESIZE_WIDTH),
        normalize_type='255',
        gen_input_ailia=True,
    )
    input_img_resized = (input_img_resized * 2) - 1

    input_batch = [add_aging_channel(input_img_resized[0], age) for age in args.target_age.split(',')]
    
    # inference
    logger.info('Start inference...')
    if args.benchmark:
        logger.info('BENCHMARK mode')
        for i in range(5):
            # ailia prediction
            if not onnx:
                start = int(round(time.time() * 1000))
                result_batch = run_on_batch(input_batch, net)
                end = int(round(time.time() * 1000))
                logger.info(f'\tailia processing time {end - start} ms')
            # onnx runtime prediction
            else:
                start = int(round(time.time() * 1000))
                result_batch = run_on_batch(input_batch, net, onnx=True)
                end = int(round(time.time() * 1000))
                logger.info(f'\tonnx runtime processing time {end - start} ms')
    else:
        # ailia prediction
        if not onnx:
            result_batch = run_on_batch(input_batch, net)
        # onnx runtime prediction
        else:
            result_batch = run_on_batch(input_batch, net, onnx=True)

    # post processing
    res = post_processing(result_batch, input_img)

    # save results       
    savepath = get_savepath(args.savepath, filename)
    logger.info(f'saved at : {savepath}')
    cv2.imwrite(savepath, res)
예제 #6
0
def estimate_from_image():
    # net initialize
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id)

    # input image loop
    for image_path in args.input:
        # prepare input data
        logger.info(image_path)
        org_img = load_image(
            image_path,
            (IMAGE_HEIGHT, IMAGE_WIDTH),
            normalize_type='None',
        )
        org_img = cv2.cvtColor(org_img, cv2.COLOR_BGR2RGB)
        input_data = load_image(
            image_path,
            (IMAGE_HEIGHT, IMAGE_WIDTH),
            rgb=False,
            normalize_type='None',
            gen_input_ailia=True,
        )

        # inference
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(5):
                start = int(round(time.time() * 1000))
                preds_ailia = net.predict(input_data)
                end = int(round(time.time() * 1000))
                logger.info(f"\tailia processing time {end - start} ms")
        else:
            preds_ailia = net.predict(input_data)

        # estimated crowd count
        et_count = int(np.sum(preds_ailia))

        # density map
        density_map = (255 * preds_ailia / np.max(preds_ailia))[0][0]
        density_map = cv2.resize(density_map, (IMAGE_WIDTH, IMAGE_HEIGHT))
        heatmap = cv2.applyColorMap(density_map.astype(np.uint8),
                                    cv2.COLORMAP_JET)
        cv2.putText(
            heatmap,
            f'Est Count: {et_count}',
            (40, 440),  # position
            cv2.FONT_HERSHEY_SIMPLEX,  # font
            0.8,  # fontscale
            (255, 255, 255),  # color
            2,  # thickness
        )

        res_img = np.hstack((org_img, heatmap))
        savepath = get_savepath(args.savepath, image_path)
        logger.info(f'saved at : {savepath}')
        cv2.imwrite(savepath, res_img)
    logger.info('Script finished successfully.')
예제 #7
0
def recognize_from_image():
    # prepare input data
    num = lambda val: int(re.sub("\\D", "", val))
    sorted_inputs_path = sorted(os.listdir(args.input), key=num)
    input_blob = np.empty((1, 3, args.duration, IMAGE_HEIGHT, IMAGE_WIDTH))
    for i, input_path in enumerate(sorted_inputs_path[0:args.duration]):
        img = load_image(args.input + '/' + input_path,
                         (IMAGE_HEIGHT, IMAGE_WIDTH),
                         normalize_type='None',
                         gen_input_ailia=True)
        input_blob[0, :, i, :, :] = img
    next_input_index = args.duration
    input_frame_size = len(sorted_inputs_path)

    # # net initialize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=env_id)
    net.set_input_shape((1, 3, args.duration, IMAGE_HEIGHT, IMAGE_WIDTH))

    # inferece
    print('Start inference...')
    if args.benchmark:
        print('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            result = net.predict(input_blob)
            end = int(round(time.time() * 1000))
            print(f'\tailia processing time {end - start} ms')
    else:
        while (next_input_index < input_frame_size):
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            result = net.predict(input_blob)

            print_mars_result(result)

            preview_img = cv2.imread(args.input + '/' +
                                     sorted_inputs_path[next_input_index -
                                                        args.duration])
            cv2.imshow('preview', preview_img)

            for i in range(args.duration - 1):
                input_blob[0, :, i, :, :] = input_blob[0, :, i + 1, :, :]

            img = load_image(args.input + '/' +
                             sorted_inputs_path[next_input_index],
                             (IMAGE_HEIGHT, IMAGE_WIDTH),
                             normalize_type='None',
                             gen_input_ailia=True)
            input_blob[0, :, args.duration - 1, :, :] = img
            next_input_index += 1

    print('Script finished successfully.')
예제 #8
0
def get_batch():
        number_of_pairs = ((batch_size + 1) // 2) // 2
        singles = sample(singleton_folders, k=batch_size - number_of_pairs * 2)
        multiples = sample(multiple_folders, k=number_of_pairs)
        batch = []
        for m in multiples:
                ms = sample(image_paths[m], k=2)
                # print(ms)
                for filename in ms:
                        batch.append(image_utils.load_image(os.path.join(m, filename), training_params.shape))
        for s in singles:
                batch.append(image_utils.load_image(os.path.join(s, image_paths[s][0]), training_params.shape))
        return np.array(batch)
예제 #9
0
def prepare_input_data(image_path):
    if args.arch == "vggface2":
        image = load_image(image_path, (IMAGE_HEIGHT, IMAGE_WIDTH),
                           normalize_type='None',
                           gen_input_ailia=False)
        return preprocess_image_vggface2(image)
    else:
        # arcface
        image = load_image(image_path,
                           image_shape=(IMAGE_HEIGHT, IMAGE_WIDTH),
                           rgb=False,
                           normalize_type='None')
        return preprocess_image_arcface(image)
예제 #10
0
def extract_feature_vec_from_image():
    # prepare input data
    input_img = load_image(
        args.input,
        (IMAGE_HEIGHT, IMAGE_WIDTH),
        normalize_type='None',
    )
    input_data = prepare_input_data(input_img)

    # net initialize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')
    fe_net = ailia.Net(FE_MODEL_PATH, FE_WEIGHT_PATH, env_id=env_id)
    fe_net.set_input_shape(input_data.shape)

    input_dict = {'data': input_data}

    # inference
    print('Start inference...')
    if args.benchmark:
        print('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            _ = fe_net.predict(input_dict)[0]
            end = int(round(time.time() * 1000))
            print(f'\tailia processing time {end - start} ms')
    else:
        _ = fe_net.predict(input_dict)[0]

    # Extracting the output of a specifc layer
    idx = fe_net.find_blob_index_by_name('encode1')
    preds_ailia = fe_net.get_blob_data(idx)
    print(preds_ailia.reshape(preds_ailia.shape[0], -1))
    print('Script finished successfully.')
예제 #11
0
    def _data_generator(
            self, image_files: List[PurePath]) -> Tuple[np.array, np.array]:
        """Generate data from samples in specified batch."""
        #  initialize images and labels tensors for faster processing
        X = np.empty((len(image_files), *self.target_size, 3))

        invalid_image_idx = []
        for i, image_file in enumerate(image_files):
            # load and randomly augment image
            img = load_image(image_file=image_file,
                             target_size=self.target_size,
                             grayscale=False)

            if img is not None:
                X[i, :] = img

            else:
                invalid_image_idx.append(i)
                self.invalid_image_idx.append(self.counter)

            self.counter += 1

        if invalid_image_idx:
            X = np.delete(X, invalid_image_idx, axis=0)

        # apply basenet specific preprocessing
        # input is 4D numpy array of RGB values within [0, 255]
        X = self.basenet_preprocess(X)

        return X
예제 #12
0
def recognize_from_image():
    # prepare input data
    input_img = cv2.imread(args.input)
    data = load_image(args.input, (IMAGE_HEIGHT, IMAGE_WIDTH),
                      normalize_type='255',
                      gen_input_ailia=True)

    # net initalize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=env_id)

    # compute execution time
    for i in range(5):
        start = int(round(time.time() * 1000))
        preds_ailia = net.predict(data)[0]
        end = int(round(time.time() * 1000))
        print(f'ailia processing time {end - start} ms')

    visualize_plots(input_img, preds_ailia)
    cv2.imwrite(args.savepath, input_img)

    # Confidence Map
    channels = preds_ailia.shape[0]
    cols = 8
    plot_images('confidence',
                preds_ailia,
                tile_shape=((int)((channels + cols - 1) / cols), cols))
    print('Script finished successfully.')
예제 #13
0
def recognize_from_image():
    # net initialize
    pose = ailia.PoseEstimator(MODEL_PATH,
                               WEIGHT_PATH,
                               env_id=args.env_id,
                               algorithm=ALGORITHM)

    # input image loop
    for image_path in args.input:
        # prepare input data
        logger.info(image_path)
        src_img = cv2.imread(image_path)
        input_image = load_image(image_path, (IMAGE_HEIGHT, IMAGE_WIDTH),
                                 normalize_type='None')
        input_data = cv2.cvtColor(input_image, cv2.COLOR_RGB2BGRA)

        # inference
        logger.info('Start inference...')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(5):
                start = int(round(time.time() * 1000))
                _ = pose.compute(input_data)
                end = int(round(time.time() * 1000))
                logger.info(f'\tailia processing time {end - start} ms')
        else:
            _ = pose.compute(input_data)

        # post-processing
        count = pose.get_object_count()
        logger.info(f'person_count={count}')
        display_result(src_img, pose)
        # cv2.imwrite(args.savepath, src_img)
        cv2.imwrite(get_savepath(args.savepath, image_path), src_img)
    logger.info('Script finished successfully.')
예제 #14
0
def recognize_from_image():
    # prepare input data
    input_img = load_image(
        args.input,
        (IMAGE_HEIGHT, IMAGE_WIDTH),
        normalize_type='None',
    )
    input_data = cv2.cvtColor(input_img, cv2.COLOR_BGR2BGRA)

    # net initialize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')
    classifier = ailia.Classifier(MODEL_PATH,
                                  WEIGHT_PATH,
                                  env_id=env_id,
                                  format=ailia.NETWORK_IMAGE_FORMAT_RGB,
                                  range=ailia.NETWORK_IMAGE_RANGE_U_FP32)

    # compute execution time
    for i in range(5):
        start = int(round(time.time() * 1000))
        classifier.compute(input_data, MAX_CLASS_COUNT)
        count = classifier.get_class_count()
        end = int(round(time.time() * 1000))
        print(f'ailia processing time {end - start} ms')

    # postprocessing
    for idx in range(count):
        # print result
        print(f'+ idx={idx}')
        info = classifier.get_class(idx)
        print(f'  category={info.category}' +\
              f'[ {inceptionv3_labels.imagenet_category[info.category]} ]')
        print(f'  prob={info.prob}')
    print('Script finished successfully.')
예제 #15
0
def recognize_from_image():
    # net initialize
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id)

    # input image loop
    for image_path in args.input:
        # prepare input data
        logger.info(image_path)
        src_img = cv2.imread(image_path)
        input_data = load_image(
            image_path,
            (IMAGE_HEIGHT, IMAGE_WIDTH),
        )
        input_data = input_data[np.newaxis, :, :, :]
        net.set_input_shape(input_data.shape)

        # inference
        logger.info('Start inference...')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(5):
                start = int(round(time.time() * 1000))
                preds_ailia = net.predict(input_data)
                end = int(round(time.time() * 1000))
                logger.info(f'\tailia processing time {end - start} ms')
        else:
            preds_ailia = net.predict(input_data)

        # postprocessing
        pred = preds_ailia.reshape((IMAGE_HEIGHT, IMAGE_WIDTH))
        dst = transfer(src_img, pred)
        savepath = get_savepath(args.savepath, image_path)
        logger.info(f'saved at : {savepath}')
        cv2.imwrite(savepath, dst)
    logger.info('Script finished successfully.')
예제 #16
0
def recognize_from_image():
    # prepare input data
    img = load_image(
        args.input,
        (IMAGE_HEIGHT, IMAGE_WIDTH),
        normalize_type='None',
    )

    if args.add_noise:
        img = add_noise(img)

    img = img / 255.0
    input_data = img.transpose(2, 0, 1)
    input_data.shape = (1, ) + input_data.shape

    # net initialize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=env_id)

    # compute execution time
    for i in range(5):
        start = int(round(time.time() * 1000))
        preds_ailia = net.predict(input_data)
        end = int(round(time.time() * 1000))
        print(f'ailia processing time {end - start} ms')

    # postprocessing
    output_img = preds_ailia[0].transpose(1, 2, 0) * 255
    output_img = np.clip(output_img, 0, 255)
    output_img = cv2.cvtColor(output_img, cv2.COLOR_RGB2BGR)
    cv2.imwrite(args.savepath, output_img)
    print('Script finished successfully.')
예제 #17
0
def recognize_from_image():
    # prepare input data
    input_data = load_image(args.input, (IMAGE_HEIGHT, IMAGE_WIDTH),
                            normalize_type='None',
                            gen_input_ailia=False)
    input_data = cv2.cvtColor(input_data.astype(np.float32),
                              cv2.COLOR_RGB2BGRA).astype(np.uint8)

    # net initialize
    classifier = ailia.Classifier(MODEL_PATH,
                                  WEIGHT_PATH,
                                  env_id=args.env_id,
                                  format=ailia.NETWORK_IMAGE_FORMAT_RGB,
                                  range=ailia.NETWORK_IMAGE_RANGE_S_FP32)

    # inference
    print('Start inference...')
    if args.benchmark:
        print('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            classifier.compute(input_data, MAX_CLASS_COUNT)
            # count = classifier.get_class_count()
            end = int(round(time.time() * 1000))
            print(f'\tailia processing time {end - start} ms')
    else:
        classifier.compute(input_data, MAX_CLASS_COUNT)
        # count = classifier.get_class_count()

    # show results
    print_results(classifier, efficientnet_labels.imagenet_category)

    print('Script finished successfully.')
예제 #18
0
def recognize_tag_from_image():
    # prepare input data
    input_img = load_image(
        args.input,
        (IMAGE_HEIGHT, IMAGE_WIDTH),
        normalize_type='None',
    )
    input_data = prepare_input_data(input_img)

    # net initialize
    tag_net = ailia.Net(TAG_MODEL_PATH, TAG_WEIGHT_PATH, env_id=args.env_id)
    tag_net.set_input_shape(input_data.shape)

    if check_file_existance(TAG_PATH):
        tags = np.array(json.loads(open(TAG_PATH, 'r').read()))
        assert(len(tags) == 1539)

    input_dict = {'data': input_data}

    # inference
    print('Start inference...')
    if args.benchmark:
        print('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            preds_ailia = tag_net.predict(input_dict)[0]
            end = int(round(time.time() * 1000))
            print(f'\tailia processing time {end - start} ms')
    else:
        preds_ailia = tag_net.predict(input_dict)[0]

    prob = preds_ailia.reshape(preds_ailia.shape[0], -1)
    preds = estimate_top_tags(prob, tags, 512)  # TODO how to decide n_tag?
    pprint(apply_threshold(preds, THRESHOLD))
    print('Script finished successfully.')
예제 #19
0
def unwarp_from_image():
    org_img = cv2.imread(args.input)
    img = load_image(args.input, (WC_IMG_HEIGHT, WC_IMG_WIDTH),
                     normalize_type='255',
                     gen_input_ailia=True)

    # net initialize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')
    bm_net = ailia.Net(BM_MODEL_PATH, BM_WEIGHT_PATH, env_id=env_id)
    wc_net = ailia.Net(WC_MODEL_PATH, WC_WEIGHT_PATH, env_id=env_id)

    # compute exectuion time
    for i in range(5):
        start = int(round(time.time() * 1000))

        wc_output = wc_net.predict(img)[0]
        pred_wc = np.clip(wc_output, 0, 1.0).transpose(1, 2, 0)
        bm_input = cv2.resize(pred_wc,
                              (BM_IMG_WIDTH, BM_IMG_HEIGHT)).transpose(
                                  2, 0, 1)
        bm_input = np.expand_dims(bm_input, 0)
        outputs_bm = bm_net.predict(bm_input)[0]
        uwpred = unwarp(org_img, outputs_bm)  # This is not on GPU!

        end = int(round(time.time() * 1000))
        print("ailia processing time {} ms".format(end - start))

    cv2.imwrite(args.savepath, uwpred * 255)
    print('Script finished successfully.')
예제 #20
0
def unwarp_from_image():
    org_img = cv2.imread(args.input)
    img = load_image(args.input, (WC_IMG_HEIGHT, WC_IMG_WIDTH),
                     normalize_type='255',
                     gen_input_ailia=True)

    # net initialize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')
    bm_net = ailia.Net(BM_MODEL_PATH, BM_WEIGHT_PATH, env_id=env_id)
    wc_net = ailia.Net(WC_MODEL_PATH, WC_WEIGHT_PATH, env_id=env_id)

    # inference
    print('Start inference...')
    if args.benchmark:
        print('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            uwpred = run_inference(wc_net, bm_net, img, org_img)
            end = int(round(time.time() * 1000))
            print("\tailia processing time {} ms".format(end - start))
    else:
        uwpred = run_inference(wc_net, bm_net, img, org_img)

    cv2.imwrite(args.savepath, uwpred * 255)
    print('Script finished successfully.')
예제 #21
0
def recognize_from_image():
    # prepare input data
    org_img = cv2.imread(args.input)
    img = load_image(args.input, (IMAGE_HEIGHT, IMAGE_WIDTH),
                     rgb=False,
                     normalize_type='None')
    img = cv2.equalizeHist(img)
    if platform.system() == 'Darwin':  # For Mac OS (FP16)
        data = img[np.newaxis, np.newaxis, :, :] / 255.0 - 0.5
    else:
        data = img[np.newaxis, np.newaxis, :, :] / 127.5 - 1.0
    eyeI = np.concatenate((data, data), axis=0)
    eyeI = eyeI.reshape(2, IMAGE_HEIGHT, IMAGE_WIDTH, 1)

    # net initialize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=env_id)

    # compute execution time
    for i in range(5):
        start = int(round(time.time() * 1000))
        preds_ailia = net.predict(eyeI)
        preds_ailia = net.get_blob_data(
            net.find_blob_index_by_name(OUTPUT_BLOB_NAME))
        end = int(round(time.time() * 1000))
        print(f'ailia processing time {end - start} ms')

    # postprocessing
    img = plot_on_image(org_img, preds_ailia)
    cv2.imwrite(args.savepath, img)
    print('Script finished successfully.')
예제 #22
0
def recognize_from_image():
    # prepare input data
    src_img = cv2.imread(args.input)
    input_data = load_image(
        args.input,
        (IMAGE_HEIGHT, IMAGE_WIDTH),
    )
    input_data = input_data[np.newaxis, :, :, :]

    # net initialize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=env_id)
    net.set_input_shape(input_data.shape)

    # compute execution time
    for i in range(5):
        start = int(round(time.time() * 1000))
        preds_ailia = net.predict(input_data)
        end = int(round(time.time() * 1000))
        print(f'ailia processing time {end - start} ms')

    # postprocessing
    pred = preds_ailia.reshape((IMAGE_HEIGHT, IMAGE_WIDTH))
    dst = transfer(src_img, pred)
    cv2.imwrite(args.savepath, dst)
예제 #23
0
def recognize_from_image():
    # prepare input data
    input_data = load_image(
        args.input,
        (IMAGE_HEIGHT, IMAGE_WIDTH),
        normalize_type='255',
        gen_input_ailia=True
    )

    # net initialize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=env_id)

    # inference
    print('Start inference...')
    if args.benchmark:
        print('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            preds_ailia = net.predict(input_data)
            end = int(round(time.time() * 1000))
            print(f'\tailia processing time {end - start} ms')
    else:
        preds_ailia = net.predict(input_data)

    # postprocessing
    output_img = preds_ailia[0].transpose((1, 2, 0))
    output_img = cv2.cvtColor(output_img, cv2.COLOR_RGB2BGR)
    cv2.imwrite(args.savepath, output_img * 255)
    print('Script finished successfully.')
예제 #24
0
def unwarp_from_image():
    # net initialize
    bm_net = ailia.Net(BM_MODEL_PATH, BM_WEIGHT_PATH, env_id=args.env_id)
    wc_net = ailia.Net(WC_MODEL_PATH, WC_WEIGHT_PATH, env_id=args.env_id)

    # input image loop
    for image_path in args.input:
        # prepare input data
        logger.info(image_path)
        org_img = cv2.imread(image_path)
        img = load_image(
            image_path,
            (WC_IMG_HEIGHT, WC_IMG_WIDTH),
            normalize_type='255',
            gen_input_ailia=True,
        )

        # inference
        logger.info('Start inference...')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(5):
                start = int(round(time.time() * 1000))
                uwpred = run_inference(wc_net, bm_net, img, org_img)
                end = int(round(time.time() * 1000))
                logger.info("\tailia processing time {} ms".format(end-start))
        else:
            uwpred = run_inference(wc_net, bm_net, img, org_img)

        savepath = get_savepath(args.savepath, image_path)
        logger.info(f'saved at : {savepath}')
        cv2.imwrite(savepath, uwpred * 255)
    logger.info('Script finished successfully.')
예제 #25
0
def recognize_from_image():
    # net initialize
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id)

    # input image loop
    for image_path in args.input:
        # prepare input data
        logger.info(image_path)
        input_data = load_image(
            image_path,
            (IMAGE_HEIGHT, IMAGE_WIDTH),
            normalize_type='ImageNet',
            gen_input_ailia=True,
        )

        # inference
        logger.info('Start inference...')
        if args.benchmark:
            logger.info('BENCHMARK mode')
            for i in range(args.benchmark_count):
                start = int(round(time.time() * 1000))
                preds_ailia = net.predict(input_data)
                end = int(round(time.time() * 1000))
                logger.info(f'\tailia processing time {end - start} ms')
        else:
            preds_ailia = net.predict(input_data)

        print_results(preds_ailia, mobilenetv3_labels.imagenet_category)
    logger.info('Script finished successfully.')
예제 #26
0
def recognize_from_image():
    # prepare input data
    src_img = cv2.imread(args.input)
    input_data = load_image(
        args.input,
        (IMAGE_HEIGHT, IMAGE_WIDTH),
    )
    input_data = input_data[np.newaxis, :, :, :]

    # net initialize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=env_id)
    net.set_input_shape(input_data.shape)

    # inference
    print('Start inference...')
    if args.benchmark:
        print('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            preds_ailia = net.predict(input_data)
            end = int(round(time.time() * 1000))
            print(f'\tailia processing time {end - start} ms')
    else:
        preds_ailia = net.predict(input_data)

    # postprocessing
    pred = preds_ailia.reshape((IMAGE_HEIGHT, IMAGE_WIDTH))
    dst = transfer(src_img, pred)
    cv2.imwrite(args.savepath, dst)
    print('Script finished successfully.')
예제 #27
0
def recognize_from_image():
    # prepare input data
    input_data = load_image(
        args.input,
        (IMAGE_HEIGHT, IMAGE_WIDTH),
        gen_input_ailia=True
    )

    # net initialize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=env_id)

    # inference
    print('Start inference...')
    if args.benchmark:
        print('BENCHMARK mode')    
        for i in range(5):
            start = int(round(time.time() * 1000))
            preds_ailia = net.predict(input_data)
            end = int(round(time.time() * 1000))
            print(f'\tailia processing time {end - start} ms')
    else:
        preds_ailia = net.predict(input_data)

    # postprocessing
    if args.smooth:
        preds_ailia = smooth_output(preds_ailia)

    save_pred(preds_ailia, args.savepath, IMAGE_HEIGHT, IMAGE_WIDTH)
    print('Script finished successfully.')
예제 #28
0
def recognize_from_image():
    # prepare input data
    img = load_image(args.input, (IMAGE_HEIGHT, IMAGE_WIDTH),
                     rgb=False,
                     gen_input_ailia=True)

    # net initialize
    net = ailia.Net(MODEL_PATH, WEIGHT_PATH, env_id=args.env_id)

    # inference
    print('Start inference...')
    if args.benchmark:
        print('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            preds_ailia = net.predict(img)[0]
            end = int(round(time.time() * 1000))
            print(f'\tailia processing time {end - start} ms')
    else:
        preds_ailia = net.predict(img)[0]

    # postprocess
    fig = gen_img_from_predsailia(img, preds_ailia)
    fig.savefig(args.savepath)
    print('Script finished successfully.')
예제 #29
0
def estimate_from_image():
    # prepare input data
    org_height, org_width, _ = cv2.imread(args.input).shape
    input_data = load_image(args.input, (IMAGE_HEIGHT, IMAGE_WIDTH),
                            gen_input_ailia=True)

    # net initialize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')
    enc_net = ailia.Net(ENC_MODEL_PATH, ENC_WEIGHT_PATH, env_id=env_id)
    dec_net = ailia.Net(DEC_MODEL_PATH, DEC_WEIGHT_PATH, env_id=env_id)

    # inference
    print('Start inference...')
    if args.benchmark:
        print('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            features = enc_net.predict([input_data])
            preds_ailia = dec_net.predict(features)
            end = int(round(time.time() * 1000))
            print(f'\tailia processing time {end - start} ms')
    else:
        features = enc_net.predict([input_data])
        preds_ailia = dec_net.predict(features)

    # postprocessing
    disp = preds_ailia[-1]
    disp_resized, vmax = result_plot(disp, org_width, org_height)
    plt.imsave(args.savepath, disp_resized, cmap='magma', vmax=vmax)
    print('Script finished successfully.')
예제 #30
0
def recognize_from_image():
    # prepare input data
    src_img = cv2.imread(args.input)
    input_image = load_image(
        args.input,
        (IMAGE_HEIGHT, IMAGE_WIDTH),
        normalize_type='None'
    )
    input_data = cv2.cvtColor(input_image, cv2.COLOR_RGB2BGRA)

    # net initialize
    env_id = ailia.get_gpu_environment_id()
    print(f'env_id: {env_id}')
    pose = ailia.PoseEstimator(
        MODEL_PATH, WEIGHT_PATH, env_id=env_id, algorithm=ALGORITHM
    )

    # inference
    print('Start inference...')
    if args.benchmark:
        print('BENCHMARK mode')
        for i in range(5):
            start = int(round(time.time() * 1000))
            _ = pose.compute(input_data)
            end = int(round(time.time() * 1000))
            print(f'\tailia processing time {end - start} ms')
    else:
        _ = pose.compute(input_data)
        
    # postprocessing
    count = pose.get_object_count()
    print(f'person_count={count}')
    display_result(src_img, pose)
    cv2.imwrite(args.savepath, src_img)
    print('Script finished successfully.')
예제 #31
0
def check_scipy():
	import scipy 
	vnum = int(scipy.__version__.split('.')[1])
	assert vnum>=16,"You must install scipy >=0.16.0"
check_scipy()
from squeezenet import SqueezeNet
import tensorflow as tf 
tf.reset_default_graph()
sess = get_session()
SAVE_PATH = 'datasets/squeezenet.ckpt'
print(SAVE_PATH)
#if not os.path.exists(SAVE_PATH):
    #raise ValueError("You need to download SqueezeNet!")
model = SqueezeNet(save_path=SAVE_PATH, sess=sess)
#load data for testing 
content_img_test = preprocess_image(load_image('tubingen.jpg',size=192))[None]
style_img_test = preprocess_image(load_image('starry_night.jpg',size=192))[None]
answers = np.load('style-transfer-checks.npz')

def content_loss(content_weight,content_curr,content_orig):
	return content_weight*tf.reduce_sum(tf.squared_difference(content_curr,content_orig))

def gram_matrix(features,normalize=True):
	"""Inputs: the shape of features is (1,H,W,C)"""
	features = tf.transpose(features,[0,3,1,2])
	shape = tf.shape(features)
	features = tf.reshape(features,(shape[0],shape[1],-1))
	transpose_features = tf.transpose(features,[0,2,1])
	output = tf.matmul(features,transpose_features)
	if normalize:
		output = tf.div(output,tf.cast(shape[0]*shape[1]*shape[2]*shape[3],tf.float32))
예제 #32
0
def style_transfer(content_img,style_img,content_size,style_size,content_layer,style_layers,
					content_weight,style_weights,tv_weight,init_random=False):
	content_pre_img = preprocess_image(load_image(content_img,size=content_size))
	feats = model.extract_features(model.image)  #extract features of every layer from the input image
	content_targets = sess.run(feats[content_layer],{model.image:content_pre_img[None]})
	style_pre_img = preprocess_image(load_image(style_img,size = style_size))
	style_feats = [feats[idx] for idx in style_layers]
	#to transfer gram
	style_target=[] 
	for style_feat_var in style_feats:
		style_target.append(gram_matrix(style_feat_var))
	style_targets = sess.run(style_target,{model.image:style_pre_img[None]})

	if init_random:
		img_var = tf.Variable(tf.random_uniform(content_pre_img[None].shape,0,1),name="image")
	else:
		img_var = tf.Variable(content_pre_img[None], name="image")
	# to compute loss  
	#print(img_var[None].shape)
	feat = model.extract_features(img_var)
	conloss = content_loss(content_weight,feat[content_layer],content_targets)
	styloss = style_loss(style_weights,feat,style_layers,style_targets)
	tvloss = TV_loss(img_var,tv_weight)
	loss = conloss+styloss+tvloss
	
	#params 
	initial_lr = 3.0
	decayed_lr = 0.1 
	decayed_lr_at = 180
	max_iters = 200 

	lr_var = tf.Variable(initial_lr,name="lr")
	# Create train_op that updates the generated image when run
	with tf.variable_scope("optimizer") as opt_scope:
		train_op = tf.train.AdamOptimizer(lr_var).minimize(loss,var_list=[img_var])
	# Initialize the generated image and optimization variables
	opt_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,scope=opt_scope.name)
	sess.run(tf.variables_initializer([lr_var,img_var]+opt_vars))
	# Create an op that will clamp the image values when run
	clamp_image = tf.assign(img_var,tf.clip_by_value(img_var,-1.5,1.5))
	
	#plot 
	f,s = plt.subplots(1,2)
	s[0].axis('off')
	s[1].axis('off')
	s[0].set_title('content source img')
	s[1].set_title('style source img')
	s[0].imshow(deprocess_image(content_pre_img))
	s[1].imshow(deprocess_image(style_pre_img))
	plt.show()
	plt.figure()

	for i in range(max_iters):
		#take a optimization step to update the img
		sess.run(train_op)
		if i < decayed_lr_at:
			sess.run(clamp_image)
		if i == decayed_lr_at:
			sess.run(tf.assign(lr_var,decayed_lr))
		if i % 100 ==0:
			print('Iteration:{}'.format(i))
			img = sess.run(img_var)
			plt.imshow(deprocess_image(img[0],rescale=True))
			plt.axis('off')
			plt.show()
	print('Iteration:{}'.format(i))
	img = sess.run(img_var)
	plt.imshow(deprocess_image(img[0],rescale=True))
	plt.axis('off')
	plt.show()
예제 #33
0
 def __init__(self):
     self.fontmap = image_utils.load_image("res/textures", "font.png")
     self.fontmap.set_colorkey((0, 0, 0))
     self.defaultMask = self.fontmap.get_masks()