コード例 #1
0
ファイル: ilsvrc_2012.py プロジェクト: ytfksw/blueoil
    def __getitem__(self, i):
        filename = self.files[i]

        image = load_image(filename)

        label = data_processor.binarize(self.annotations[i], self.num_classes)
        label = np.reshape(label, (self.num_classes))
        return (image, label)
コード例 #2
0
ファイル: mscoco.py プロジェクト: ytfksw/blueoil
    def __getitem__(self, i):
        image_id = self._image_ids[i]
        image_file = self._image_file_from_image_id(image_id)
        image = load_image(image_file)

        label = self._label_from_image_id(image_id)

        return (image, label)
コード例 #3
0
    def __getitem__(self, i, type=None):
        target_file = self.files[i]

        image = load_image(target_file)
        label = self.get_label(target_file)

        label = data_processor.binarize(label, self.num_classes)
        label = np.reshape(label, (self.num_classes))
        return (image, label)
コード例 #4
0
ファイル: mscoco.py プロジェクト: ytfksw/blueoil
    def __getitem__(self, i):
        target_file = self.files[i]
        image = load_image(target_file)

        gt_boxes = self.annotations[i]
        gt_boxes = np.array(gt_boxes)
        gt_boxes = self._fill_dummy_boxes(gt_boxes)

        return (image, gt_boxes)
コード例 #5
0
    def __getitem__(self, i, type=None):
        target_file = self.files[i]
        image = load_image(target_file)

        gt_boxes = self.annotations[i]
        gt_boxes = np.array(gt_boxes)
        gt_boxes = gt_boxes.copy()  # is it really needed?
        gt_boxes = self._fill_dummy_boxes(gt_boxes)

        return (image, gt_boxes)
コード例 #6
0
ファイル: open_images_v4.py プロジェクト: 2429581027/blueoil
    def __getitem__(self, i, type=None):
        files, labels = self.files_and_annotations

        filename = files[i]

        image = load_image(filename)

        label = data_processor.binarize(labels[i], self.num_classes)
        label = np.reshape(label, (self.num_classes))
        return (image, label)
コード例 #7
0
    def __getitem__(self, i, type=None):
        target_file = os.path.join(self.img_dir, self.paths[i])

        image = load_image(target_file)

        gt_boxes = self.bboxs[i]
        gt_boxes = np.array(gt_boxes)
        gt_boxes = gt_boxes.copy()  # is it really needed?
        gt_boxes = self._fill_dummy_boxes(gt_boxes)

        return (image, gt_boxes)
コード例 #8
0
    def __getitem__(self, i, type=None):
        image_file_path = self.paths[i]

        image = load_image(image_file_path)

        gt_boxes = self.bboxs[i]
        gt_boxes = np.array(gt_boxes)
        gt_boxes = gt_boxes.copy()  # is it really needed?
        gt_boxes = self._fill_dummy_boxes(gt_boxes)

        return image, gt_boxes
コード例 #9
0
ファイル: ytfaces.py プロジェクト: ytfksw/blueoil
    def __getitem__(self, item):
        """Get an item given index.

        Args:
            item: int, index.

        Returns:
            image: a numpy array of shape (height, width, 3).
            joints: a numpy array of shape (68, 3), which has coordinates in image.

        """

        return load_image(self.files[item]), self.joints_list[item]
コード例 #10
0
def _get_images(filenames, pre_processor, data_format):
    images = []
    raw_images = []

    for filename in filenames:
        if filename == DUMMY_FILENAME:
            raw_image = np.zeros((64, 64, 3), dtype=np.uint8)
        else:
            raw_image = load_image(filename)

        image = pre_processor(image=raw_image)['image']
        if data_format == 'NCHW':
            image = np.transpose(image, [2, 0, 1])

        images.append(image)
        raw_images.append(raw_image)

    return np.array(images), np.array(raw_images)
コード例 #11
0
    def __getitem__(self, item):
        """
        Get an item given index.
        Args:
            item: int, index.

        Returns:
            cropped_image: a numpy array of shape (height, width, 3).
            joints: a numpy array of shape (17, 3), which has local coordinates in cropped_image.
        """
        full_image = load_image(self.files[item])
        box = self.box_list[item]
        joints = self.joints_list[item]

        cropped_image, joints = self.crop_from_full_image(
            full_image, box, joints)

        return cropped_image, joints
コード例 #12
0
ファイル: open_images_v4.py プロジェクト: 2429581027/blueoil
    def __getitem__(self, i, type=None):
        files, gt_boxes_list = self.files_and_annotations
        target_file = files[i]
        gt_boxes = gt_boxes_list[i]

        image = load_image(target_file)
        height = image.shape[0]
        width = image.shape[1]

        gt_boxes = np.array(gt_boxes)

        # Change box coordinate from [0, 1] to [0, image size].
        gt_boxes = np.stack([
            gt_boxes[:, 0] * width,
            gt_boxes[:, 1] * height,
            gt_boxes[:, 2] * width,
            gt_boxes[:, 3] * height,
            gt_boxes[:, 4],
        ], axis=1)

        gt_boxes = self._fill_dummy_boxes(gt_boxes)

        return (image, gt_boxes)
コード例 #13
0
def _image():
    image = load_image("unit/fixtures/sample_images/cat.jpg")

    return image
コード例 #14
0
def run_prediction(input_image, model, config_file, trial=1):
    if not input_image or not model or not config_file:
        logger.error('Please check usage with --help option')
        exit(1)

    config = load_yaml(config_file)

    # load the image
    image_data = load_image(input_image)
    raw_image = image_data

    # initialize Network
    nn = _init(model, config)

    # pre process for image
    image_data, bench_pre = _timerfunc(
        _pre_process, (image_data, config.PRE_PROCESSOR, config.DATA_FORMAT),
        trial)

    # add the batch dimension
    image_data = np.expand_dims(image_data, axis=0)

    # run the model to inference
    output, bench_inference = _timerfunc(_run, (nn, image_data), trial)

    logger.info('Output: (before post process)\n{}'.format(output))

    # pre process for output
    output, bench_post = _timerfunc(_post_process,
                                    (output, config.POST_PROCESSOR), trial)

    logger.info('Output: (after post process)\n{}'.format(output))

    # json output
    json_output = JsonOutput(
        task=Tasks(config.TASK),
        classes=config.CLASSES,
        image_size=config.IMAGE_SIZE,
        data_format=config.DATA_FORMAT,
        bench={
            "total": bench_pre + bench_post + bench_inference,
            "pre": bench_pre,
            "post": bench_post,
            "inference": bench_inference,
        },
    )

    image_from_json = ImageFromJson(
        task=Tasks(config.TASK),
        classes=config.CLASSES,
        image_size=config.IMAGE_SIZE,
    )

    output_dir = "output"
    outputs = output
    raw_images = [raw_image]
    image_files = [input_image]
    json_obj = json_output(outputs, raw_images, image_files)
    _save_json(output_dir, json_obj)
    filename_images = image_from_json(json_obj, raw_images, image_files)
    _save_images(output_dir, filename_images)
    logger.info(
        "Benchmark avg result(sec) for {} trials: pre_process: {}  inference: {} post_process: {}  Total: {}"
        .format(
            trial,
            bench_pre,
            bench_inference,
            bench_post,
            bench_pre + bench_post + bench_inference,
        ))
コード例 #15
0
def _export(config, restore_path, image_path):
    if restore_path is None:
        restore_file = executor.search_restore_filename(
            environment.CHECKPOINTS_DIR)
        restore_path = os.path.join(environment.CHECKPOINTS_DIR, restore_file)

    print("Restore from {}".format(restore_path))

    if not os.path.exists("{}.index".format(restore_path)):
        raise Exception("restore file {} dont exists.".format(restore_path))

    output_root_dir = os.path.join(environment.EXPERIMENT_DIR, "export")
    output_root_dir = os.path.join(output_root_dir,
                                   os.path.basename(restore_path))

    if not os.path.exists(output_root_dir):
        os.makedirs(output_root_dir)

    graph = tf.Graph()
    ModelClass = config.NETWORK_CLASS
    network_kwargs = dict(
        (key.lower(), val) for key, val in config.NETWORK.items())

    with graph.as_default():

        model = ModelClass(
            classes=config.CLASSES,
            is_debug=config.IS_DEBUG,
            **network_kwargs,
        )

        is_training = tf.constant(False, name="is_training")

        images_placeholder, _ = model.placeholders()
        model.inference(images_placeholder, is_training)
        init_op = tf.compat.v1.global_variables_initializer()

        saver = tf.compat.v1.train.Saver(max_to_keep=50)

    session_config = tf.compat.v1.ConfigProto()
    sess = tf.compat.v1.Session(graph=graph, config=session_config)
    sess.run(init_op)

    saver.restore(sess, restore_path)

    main_output_dir = os.path.join(
        output_root_dir, "{}x{}".format(config.IMAGE_SIZE[0],
                                        config.IMAGE_SIZE[1]))
    if not os.path.exists(main_output_dir):
        os.makedirs(main_output_dir)

    # save inference values as npy files for runtime inference test and debug.
    if image_path:
        all_ops = _minimal_operations(sess)
        inference_values_output_dir = os.path.join(main_output_dir,
                                                   "inference_test_data")

        if not os.path.exists(inference_values_output_dir):
            os.makedirs(inference_values_output_dir)

        raw_image = load_image(image_path)
        image = _pre_process(raw_image, config.PRE_PROCESSOR,
                             config.DATA_FORMAT)
        images = np.expand_dims(image, axis=0)
        feed_dict = {
            images_placeholder: images,
        }

        all_outputs = []
        index = 0
        for op in all_ops:
            for op_output in op.outputs:
                # HACK: This is for TensorFlow bug workaround.
                # We can remove following 4 lines once it's been resolved in TensorFlow
                # Issue link: https://github.com/tensorflow/tensorflow/issues/36456
                if (not tf.config.experimental.list_physical_devices('GPU')
                        and "FusedBatchNormV3" in op_output.name and int(
                            op_output.name.split(":")[1]) in set(range(1, 6))):
                    continue
                val = sess.run(op_output.name, feed_dict=feed_dict)
                name = '%03d' % index + '_' + op_output.name.replace('/', '_')
                all_outputs.append({'val': val, 'name': name})
                index += 1

        _save_all_operation_outputs(image_path, inference_values_output_dir,
                                    image, raw_image, all_outputs,
                                    config.IMAGE_SIZE)

    yaml_names = config_util.save_yaml(main_output_dir, config)
    pb_name = executor.save_pb_file(sess, main_output_dir)

    message = """
Create pb and yaml files in: {}
pb: {}
yaml: {}, {}
""".format(main_output_dir, pb_name, *yaml_names)

    if image_path:
        message += "Create npy files in under `inference_test_data` folder \n"
        message += "npy: {}".format([d["name"] for d in all_outputs] + [
            "raw_image",
            "preprocessed_image",
        ])

    print(message)
    print("finish")

    return main_output_dir
コード例 #16
0
ファイル: measure_latency.py プロジェクト: ytfksw/blueoil
def _measure_time(config, restore_path, step_size):
    graph = tf.Graph()

    ModelClass = config.NETWORK_CLASS
    network_kwargs = dict(
        (key.lower(), val) for key, val in config.NETWORK.items())

    with graph.as_default():

        model = ModelClass(
            classes=config.CLASSES,
            is_debug=config.IS_DEBUG,
            **network_kwargs,
        )

        is_training = tf.constant(False, name="is_training")

        images_placeholder, labels_placeholder = model.placeholders()
        output = model.inference(images_placeholder, is_training)

        init_op = tf.compat.v1.global_variables_initializer()

        saver = tf.compat.v1.train.Saver()

    session_config = None  # tf.ConfigProto(log_device_placement=True)
    # session_config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
    sess = tf.compat.v1.Session(graph=graph, config=session_config)
    sess.run(init_op)

    if restore_path:
        saver.restore(sess, restore_path)

    # Try to inference once before measure time.
    raw_image = np.random.randint(256,
                                  size=(
                                      config.IMAGE_SIZE[0],
                                      config.IMAGE_SIZE[1],
                                      3,
                                  )).astype('uint8')
    image = _pre_process(raw_image, config.PRE_PROCESSOR, config.DATA_FORMAT)
    images = np.expand_dims(image, axis=0)
    feed_dict = {
        images_placeholder: images,
    }
    output_np = sess.run(output, feed_dict=feed_dict)
    if config.POST_PROCESSOR:
        config.POST_PROCESSOR(**{"outputs": output_np})

    # measure time
    image_files_path = os.path.join(
        os.path.dirname(os.path.realpath(__file__)), "fixtures",
        "measure_latency_images", "*.jpg")

    image_files = glob.glob(image_files_path)
    overall_times = []
    only_network_times = []

    for test_step in range(step_size):
        index = test_step % len(image_files)
        image_file = image_files[index]
        raw_image = load_image(image_file)

        start_overall = time.time()

        image = _pre_process(raw_image, config.PRE_PROCESSOR,
                             config.DATA_FORMAT)
        images = np.expand_dims(image, axis=0)
        feed_dict = {
            images_placeholder: images,
        }

        start_only_network = time.time()
        output_np = sess.run(output, feed_dict=feed_dict)
        only_network_times.append(time.time() - start_only_network)

        if config.POST_PROCESSOR:
            config.POST_PROCESSOR(**{"outputs": output_np})

        overall_times.append(time.time() - start_overall)

    return overall_times, only_network_times
コード例 #17
0
    def __getitem__(self, i):
        imgs, labels = self.files_and_annotations()
        img = load_image(imgs[i])
        label = load_image(labels[i])

        return img, label
コード例 #18
0
ファイル: div2k.py プロジェクト: 2429581027/blueoil
    def __getitem__(self, i, type=None):
        target_file = self.files[i]
        image = load_image(target_file)

        return image, None
コード例 #19
0
ファイル: run.py プロジェクト: tk26eng/blueoil
def run_prediction(input_image, model, config_file, trial=1):
    if not input_image or not model or not config_file:
        logger.error('Please check usage with --help option')
        exit(1)

    config = load_yaml(config_file)

    # load the image
    image_data = load_image(input_image)
    raw_image = image_data

    # initialize Network
    nn = _init(model, config)

    pre_process = build_pre_process(config.PRE_PROCESSOR)
    post_process = build_post_process(config.POST_PROCESSOR)

    # call functions once to exclude the first result which include some initializations
    init_output = _pre_process(image_data, pre_process, config.DATA_FORMAT)
    init_output = _run(nn, init_output)
    init_output = _post_process(init_output, post_process)

    results_total = []
    results_pre = []
    results_run = []
    results_post = []

    for _ in range(trial):
        # pre process for image
        output, bench_pre = _timerfunc(
            _pre_process, (image_data, pre_process, config.DATA_FORMAT))

        # run the model to inference
        output, bench_run = _timerfunc(_run, (nn, output))

        # pre process for output
        output, bench_post = _timerfunc(_post_process, (output, post_process))

        results_total.append(bench_pre + bench_run + bench_post)
        results_pre.append(bench_pre)
        results_run.append(bench_run)
        results_post.append(bench_post)

    time_stat = {
        "total": {
            "mean": np.mean(results_total),
            "std": np.std(results_total)
        },
        "pre": {
            "mean": np.mean(results_pre),
            "std": np.std(results_pre)
        },
        "post": {
            "mean": np.mean(results_post),
            "std": np.std(results_post)
        },
        "run": {
            "mean": np.mean(results_run),
            "std": np.std(results_run)
        },
    }

    # json output
    json_output = JsonOutput(
        task=Tasks(config.TASK),
        classes=config.CLASSES,
        image_size=config.IMAGE_SIZE,
        data_format=config.DATA_FORMAT,
        bench=time_stat,
    )

    image_from_json = ImageFromJson(
        task=Tasks(config.TASK),
        classes=config.CLASSES,
        image_size=config.IMAGE_SIZE,
    )

    output_dir = "output"
    outputs = output
    raw_images = [raw_image]
    image_files = [input_image]
    json_obj = json_output(outputs, raw_images, image_files)
    _save_json(output_dir, json_obj)
    filename_images = image_from_json(json_obj, raw_images, image_files)
    _save_images(output_dir, filename_images)
    logger.info("Benchmark avg result(sec) for {} trials".format(trial))
    logger.info(time_stat)